chore(stubs): delete invalid stubs [skip ci]

Signed-off-by: aarnphm-ec2-dev <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
aarnphm-ec2-dev
2023-07-18 16:25:54 +00:00
parent b297ec1109
commit dbca689c65
87 changed files with 461 additions and 8039 deletions

337
typings/IPython/core/interactiveshell.pyi generated Normal file
View File

@@ -0,0 +1,337 @@
import abc
from ast import stmt
from pathlib import Path
from typing import Any as AnyType
from typing import Callable
from typing import List as ListType
from typing import Optional
from typing import Tuple
from _typeshed import Incomplete
from IPython.core.alias import Alias as Alias
from IPython.core.alias import AliasManager as AliasManager
from IPython.core.autocall import ExitAutocall as ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap as BuiltinTrap
from IPython.core.compilerop import CachingCompiler as CachingCompiler
from IPython.core.debugger import InterruptiblePdb as InterruptiblePdb
from IPython.core.display_trap import DisplayTrap as DisplayTrap
from IPython.core.displayhook import DisplayHook as DisplayHook
from IPython.core.displaypub import DisplayPublisher as DisplayPublisher
from IPython.core.error import InputRejected as InputRejected
from IPython.core.error import UsageError as UsageError
from IPython.core.events import EventManager as EventManager
from IPython.core.events import available_events as available_events
from IPython.core.extensions import ExtensionManager as ExtensionManager
from IPython.core.formatters import DisplayFormatter as DisplayFormatter
from IPython.core.history import HistoryManager as HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC as ESC_MAGIC
from IPython.core.inputtransformer2 import ESC_MAGIC2 as ESC_MAGIC2
from IPython.core.logger import Logger as Logger
from IPython.core.macro import Macro as Macro
from IPython.core.oinspect import OInfo as OInfo
from IPython.core.payload import PayloadManager as PayloadManager
from IPython.core.prefilter import PrefilterManager as PrefilterManager
from IPython.core.profiledir import ProfileDir as ProfileDir
from IPython.core.usage import default_banner as default_banner
from IPython.display import display as display
from IPython.paths import get_ipython_dir as get_ipython_dir
from IPython.testing.skipdoctest import skip_doctest as skip_doctest
from traitlets.config.configurable import SingletonConfigurable
sphinxify: Optional[Callable[..., AnyType]]
class ProvisionalWarning(DeprecationWarning): ...
dedent_re: Incomplete
class ExecutionInfo:
raw_cell: Incomplete
store_history: bool
silent: bool
shell_futures: bool
cell_id: Incomplete
def __init__(self, raw_cell, store_history, silent, shell_futures, cell_id) -> None: ...
class ExecutionResult:
execution_count: Incomplete
error_before_exec: Incomplete
error_in_exec: Optional[BaseException]
info: Incomplete
result: Incomplete
def __init__(self, info) -> None: ...
@property
def success(self): ...
def raise_error(self) -> None: ...
class InteractiveShell(SingletonConfigurable):
ast_transformers: Incomplete
autocall: Incomplete
autoindent: Incomplete
autoawait: Incomplete
loop_runner_map: Incomplete
loop_runner: Incomplete
automagic: Incomplete
banner1: Incomplete
banner2: Incomplete
cache_size: Incomplete
color_info: Incomplete
colors: Incomplete
debug: Incomplete
disable_failing_post_execute: Incomplete
display_formatter: Incomplete
displayhook_class: Incomplete
display_pub_class: Incomplete
compiler_class: Incomplete
inspector_class: Incomplete
sphinxify_docstring: Incomplete
enable_html_pager: Incomplete
data_pub_class: Incomplete
exit_now: Incomplete
exiter: Incomplete
execution_count: Incomplete
filename: Incomplete
ipython_dir: Incomplete
input_transformer_manager: Incomplete
@property
def input_transformers_cleanup(self): ...
input_transformers_post: Incomplete
@property
def input_splitter(self): ...
logstart: Incomplete
logfile: Incomplete
logappend: Incomplete
object_info_string_level: Incomplete
pdb: Incomplete
display_page: Incomplete
show_rewritten_input: Incomplete
quiet: Incomplete
history_length: Incomplete
history_load_length: Incomplete
ast_node_interactivity: Incomplete
warn_venv: Incomplete
separate_in: Incomplete
separate_out: Incomplete
separate_out2: Incomplete
wildcards_case_sensitive: Incomplete
xmode: Incomplete
alias_manager: Incomplete
prefilter_manager: Incomplete
builtin_trap: Incomplete
display_trap: Incomplete
extension_manager: Incomplete
payload_manager: Incomplete
history_manager: Incomplete
magics_manager: Incomplete
profile_dir: Incomplete
@property
def profile(self): ...
pylab_gui_select: Incomplete
last_execution_succeeded: Incomplete
last_execution_result: Incomplete
configurables: Incomplete
db: Incomplete
raw_input_original: Incomplete
trio_runner: Incomplete
def __init__(
self,
ipython_dir: Incomplete | None = ...,
profile_dir: Incomplete | None = ...,
user_module: Incomplete | None = ...,
user_ns: Incomplete | None = ...,
custom_exceptions=...,
**kwargs,
) -> None: ...
def get_ipython(self): ...
def set_autoindent(self, value: Incomplete | None = ...) -> None: ...
def set_trio_runner(self, tr) -> None: ...
def init_ipython_dir(self, ipython_dir) -> None: ...
def init_profile_dir(self, profile_dir) -> None: ...
more: bool
compile: Incomplete
meta: Incomplete
tempfiles: Incomplete
tempdirs: Incomplete
starting_dir: Incomplete
indent_current_nsp: int
def init_instance_attrs(self) -> None: ...
def init_environment(self) -> None: ...
stdin_encoding: Incomplete
def init_encoding(self) -> None: ...
pycolorize: Incomplete
def init_syntax_highlighting(self, changes: Incomplete | None = ...): ...
def refresh_style(self) -> None: ...
home_dir: Incomplete
dir_stack: Incomplete
def init_pushd_popd_magic(self) -> None: ...
logger: Incomplete
def init_logger(self) -> None: ...
def init_logstart(self) -> None: ...
def init_builtins(self) -> None: ...
inspector: Incomplete
def init_inspector(self, changes: Incomplete | None = ...) -> None: ...
def init_io(self) -> None: ...
def init_prompts(self) -> None: ...
def init_display_formatter(self) -> None: ...
display_pub: Incomplete
def init_display_pub(self) -> None: ...
data_pub: Incomplete
def init_data_pub(self) -> None: ...
displayhook: Incomplete
def init_displayhook(self) -> None: ...
@staticmethod
def get_path_links(p: Path): ...
def init_virtualenv(self) -> None: ...
def save_sys_module_state(self) -> None: ...
def restore_sys_module_state(self) -> None: ...
@property
def banner(self): ...
def show_banner(self, banner: Incomplete | None = ...) -> None: ...
hooks: Incomplete
strdispatchers: Incomplete
def init_hooks(self) -> None: ...
def set_hook(
self, name, hook, priority: int = ..., str_key: Incomplete | None = ..., re_key: Incomplete | None = ...
) -> None: ...
events: Incomplete
def init_events(self) -> None: ...
def register_post_execute(self, func) -> None: ...
def new_main_mod(self, filename, modname): ...
def clear_main_mod_cache(self) -> None: ...
call_pdb: Incomplete
def init_pdb(self) -> None: ...
def debugger(self, force: bool = ...) -> None: ...
default_user_namespaces: bool
user_ns_hidden: Incomplete
ns_table: Incomplete
def init_create_namespaces(
self, user_module: Incomplete | None = ..., user_ns: Incomplete | None = ...
) -> None: ...
@property
def user_global_ns(self): ...
def prepare_user_module(self, user_module: Incomplete | None = ..., user_ns: Incomplete | None = ...): ...
def init_sys_modules(self) -> None: ...
def init_user_ns(self) -> None: ...
@property
def all_ns_refs(self): ...
def reset(self, new_session: bool = ..., aggressive: bool = ...) -> None: ...
def del_var(self, varname, by_name: bool = ...) -> None: ...
def reset_selective(self, regex: Incomplete | None = ...) -> None: ...
def push(self, variables, interactive: bool = ...) -> None: ...
def drop_by_id(self, variables) -> None: ...
def object_inspect(self, oname, detail_level: int = ...): ...
def object_inspect_text(self, oname, detail_level: int = ...): ...
def object_inspect_mime(self, oname, detail_level: int = ..., omit_sections=...): ...
def init_history(self) -> None: ...
debugger_cls = InterruptiblePdb
SyntaxTB: Incomplete
InteractiveTB: Incomplete
sys_excepthook: Incomplete
def init_traceback_handlers(self, custom_exceptions) -> None: ...
CustomTB: Incomplete
custom_exceptions: Incomplete
def set_custom_exc(self, exc_tuple, handler): ...
def excepthook(self, etype, value, tb) -> None: ...
def show_usage_error(self, exc) -> None: ...
def get_exception_only(self, exc_tuple: Incomplete | None = ...): ...
def showtraceback(
self,
exc_tuple: Incomplete | None = ...,
filename: Incomplete | None = ...,
tb_offset: Incomplete | None = ...,
exception_only: bool = ...,
running_compiled_code: bool = ...,
) -> None: ...
def showsyntaxerror(self, filename: Incomplete | None = ..., running_compiled_code: bool = ...) -> None: ...
def showindentationerror(self) -> None: ...
rl_next_input: Incomplete
def set_next_input(self, s, replace: bool = ...) -> None: ...
Completer: Incomplete
def init_completer(self) -> None: ...
def complete(self, text, line: Incomplete | None = ..., cursor_pos: Incomplete | None = ...): ...
def set_custom_completer(self, completer, pos: int = ...) -> None: ...
def set_completer_frame(self, frame: Incomplete | None = ...) -> None: ...
register_magics: Incomplete
def init_magics(self) -> None: ...
def register_magic_function(self, func, magic_kind: str = ..., magic_name: Incomplete | None = ...) -> None: ...
def run_line_magic(self, magic_name: str, line, _stack_depth: int = ...): ...
def get_local_scope(self, stack_depth): ...
def run_cell_magic(self, magic_name, line, cell): ...
def find_line_magic(self, magic_name): ...
def find_cell_magic(self, magic_name): ...
def find_magic(self, magic_name, magic_kind: str = ...): ...
def magic(self, arg_s): ...
def define_macro(self, name, themacro) -> None: ...
def system_piped(self, cmd) -> None: ...
def system_raw(self, cmd) -> None: ...
system = system_piped
def getoutput(self, cmd, split: bool = ..., depth: int = ...): ...
def init_alias(self) -> None: ...
def init_extension_manager(self) -> None: ...
def init_payload(self) -> None: ...
prefilter: Incomplete
def init_prefilter(self) -> None: ...
def auto_rewrite_input(self, cmd) -> None: ...
def user_expressions(self, expressions): ...
def ex(self, cmd) -> None: ...
def ev(self, expr): ...
def safe_execfile(
self, fname, *where, exit_ignore: bool = ..., raise_exceptions: bool = ..., shell_futures: bool = ...
) -> None: ...
def safe_execfile_ipy(self, fname, shell_futures: bool = ..., raise_exceptions: bool = ...) -> None: ...
def safe_run_module(self, mod_name, where) -> None: ...
def run_cell(
self,
raw_cell,
store_history: bool = ...,
silent: bool = ...,
shell_futures: bool = ...,
cell_id: Incomplete | None = ...,
): ...
def should_run_async(
self,
raw_cell: str,
*,
transformed_cell: Incomplete | None = ...,
preprocessing_exc_tuple: Incomplete | None = ...,
) -> bool: ...
async def run_cell_async(
self,
raw_cell: str,
store_history: bool = ...,
silent: bool = ...,
shell_futures: bool = ...,
*,
transformed_cell: Optional[str] = ...,
preprocessing_exc_tuple: Optional[AnyType] = ...,
cell_id: Incomplete | None = ...,
) -> ExecutionResult: ...
def transform_cell(self, raw_cell): ...
def transform_ast(self, node): ...
async def run_ast_nodes(
self,
nodelist: ListType[stmt],
cell_name: str,
interactivity: str = ...,
compiler=...,
result: Incomplete | None = ...,
): ...
async def run_code(self, code_obj, result: Incomplete | None = ..., *, async_: bool = ...): ...
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]: ...
active_eventloop: Incomplete
def enable_gui(self, gui: Incomplete | None = ...) -> None: ...
def enable_matplotlib(self, gui: Incomplete | None = ...): ...
def enable_pylab(self, gui: Incomplete | None = ..., import_all: bool = ..., welcome_message: bool = ...): ...
def var_expand(self, cmd, depth: int = ..., formatter=...): ...
def mktempfile(self, data: Incomplete | None = ..., prefix: str = ...): ...
def ask_yes_no(self, prompt, default: Incomplete | None = ..., interrupt: Incomplete | None = ...): ...
def show_usage(self) -> None: ...
def extract_input_lines(self, range_str, raw: bool = ...): ...
def find_user_code(
self, target, raw: bool = ..., py_only: bool = ..., skip_encoding_cookie: bool = ..., search_ns: bool = ...
): ...
def atexit_operations(self) -> None: ...
def cleanup(self) -> None: ...
def switch_doctest_mode(self, mode) -> None: ...
class InteractiveShellABC(metaclass=abc.ABCMeta): ...

View File

@@ -1,41 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from IPython.core.debugger import Pdb
PTK3 = ...
_use_simple_prompt = ...
class TerminalPdb(Pdb):
"""Standalone IPython debugger."""
def __init__(self, *args, pt_session_options=..., **kwargs) -> None: ...
def pt_init(self, pt_session_options=...): # -> None:
"""Initialize the prompt session and the prompt loop
and store them in self.pt_app and self.pt_loop.
Additional keyword arguments for the PromptSession class
can be specified in pt_session_options.
"""
...
def cmdloop(self, intro=...): # -> None:
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
override the same methods from cmd.Cmd to provide prompt toolkit replacement.
"""
...
def do_interact(self, arg): ...
def set_trace(frame=...): # -> None:
"""
Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
...
if __name__ == "__main__":
old_trace_dispatch = ...

View File

@@ -1,165 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from typing import Set
from IPython.core import magic_arguments
from IPython.core.magic import Magics
from IPython.core.magic import line_magic
from IPython.core.magic import magics_class
from IPython.terminal.interactiveshell import TerminalInteractiveShell
"""
An embedded IPython shell.
"""
class KillEmbedded(Exception): ...
KillEmbeded = KillEmbedded
@magics_class
class EmbeddedMagics(Magics):
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("-i", "--instance", action="store_true", help="Kill instance instead of call location")
@magic_arguments.argument("-x", "--exit", action="store_true", help="Also exit the current session")
@magic_arguments.argument("-y", "--yes", action="store_true", help="Do not ask confirmation")
def kill_embedded(self, parameter_s=...): # -> None:
"""%kill_embedded : deactivate for good the current embedded IPython
This function (after asking for confirmation) sets an internal flag so
that an embedded IPython will never activate again for the given call
location. This is useful to permanently disable a shell that is being
called inside a loop: once you've figured out what you needed from it,
you may then kill it and the program will then continue to run without
the interactive shell interfering again.
Kill Instance Option:
If for some reasons you need to kill the location where the instance
is created and not called, for example if you create a single
instance in one place and debug in many locations, you can use the
``--instance`` option to kill this specific instance. Like for the
``call location`` killing an "instance" should work even if it is
recreated within a loop.
.. note::
This was the default behavior before IPython 5.2
"""
...
@line_magic
def exit_raise(self, parameter_s=...): # -> None:
"""%exit_raise Make the current embedded kernel exit and raise and exception.
This function sets an internal flag so that an embedded IPython will
raise a `IPython.terminal.embed.KillEmbedded` Exception on exit, and then exit the current I. This is
useful to permanently exit a loop that create IPython embed instance.
"""
...
class _Sentinel:
def __init__(self, repr) -> None: ...
def __repr__(self): ...
class InteractiveShellEmbed(TerminalInteractiveShell):
dummy_mode = ...
exit_msg = ...
embedded = ...
should_raise = ...
display_banner = ...
exit_msg = ...
term_title = ...
_inactive_locations: Set[str] = ...
@property
def embedded_active(self): ...
@embedded_active.setter
def embedded_active(self, value): ...
def __init__(self, **kw) -> None: ...
def init_sys_modules(self): # -> None:
"""
Explicitly overwrite :mod:`IPython.core.interactiveshell` to do nothing.
"""
...
def init_magics(self): ...
def __call__(
self, header=..., local_ns=..., module=..., dummy=..., stack_depth=..., compile_flags=..., **kw
): # -> None:
"""Activate the interactive interpreter.
__call__(self,header='',local_ns=None,module=None,dummy=None) -> Start
the interpreter shell with the given local and global namespaces, and
optionally print a header string at startup.
The shell can be globally activated/deactivated using the
dummy_mode attribute. This allows you to turn off a shell used
for debugging globally.
However, *each* time you call the shell you can override the current
state of dummy_mode with the optional keyword parameter 'dummy'. For
example, if you set dummy mode on with IPShell.dummy_mode = True, you
can still have a specific call work by making it as IPShell(dummy=False).
"""
...
def mainloop(self, local_ns=..., module=..., stack_depth=..., compile_flags=...): # -> None:
"""Embeds IPython into a running python program.
Parameters
----------
local_ns, module
Working local namespace (a dict) and module (a module or similar
object). If given as None, they are automatically taken from the scope
where the shell was called, so that program variables become visible.
stack_depth : int
How many levels in the stack to go to looking for namespaces (when
local_ns or module is None). This allows an intermediate caller to
make sure that this function gets the namespace from the intended
level in the stack. By default (0) it will get its locals and globals
from the immediate caller.
compile_flags
A bit field identifying the __future__ features
that are enabled, as passed to the builtin :func:`compile` function.
If given as None, they are automatically taken from the scope where
the shell was called.
"""
...
def embed(*, header=..., compile_flags=..., **kwargs): # -> None:
"""Call this to embed IPython at the current point in your program.
The first invocation of this will create a :class:`terminal.embed.InteractiveShellEmbed`
instance and then call it. Consecutive calls just call the already
created instance.
If you don't want the kernel to initialize the namespace
from the scope of the surrounding function,
and/or you want to load full IPython configuration,
you probably want `IPython.start_ipython()` instead.
Here is a simple example::
from IPython import embed
a = 10
b = 20
embed(header='First time')
c = 30
d = 40
embed()
Parameters
----------
header : str
Optional header string to print at startup.
compile_flags
Passed to the `compile_flags` parameter of :py:meth:`terminal.embed.InteractiveShellEmbed.mainloop()`,
which is called when the :class:`terminal.embed.InteractiveShellEmbed` instance is called.
**kwargs : various, optional
Any other kwargs will be passed to the :class:`terminal.embed.InteractiveShellEmbed` constructor.
Full customization can be done by passing a traitlets :class:`Config` in as the
`config` argument (see :ref:`configure_start_ipython` and :ref:`terminal_options`).
"""
...

View File

@@ -1,7 +1,3 @@
"""
This type stub file was generated by pyright.
"""
from typing import Union as UnionType
from IPython.core.interactiveshell import InteractiveShell
@@ -12,47 +8,11 @@ from pygments.style import Style
from traitlets import Integer
from traitlets import observe
from .shortcuts.auto_suggest import NavigableAutoSuggestFromHistory
"""IPython terminal interface using prompt_toolkit"""
PTK3 = ...
class _NoStyle(Style): ...
_style_overrides_light_bg = ...
_style_overrides_linux = ...
def get_default_editor(): ...
_use_simple_prompt = ...
def black_reformat_handler(text_before_cursor): # -> str:
"""
We do not need to protect against error,
this is taken care at a higher level where any reformat error is ignored.
Indeed we may call reformatting on incomplete code.
"""
...
def yapf_reformat_handler(text_before_cursor): ...
class PtkHistoryAdapter(History):
"""
Prompt toolkit has it's own way of handling history, Where it assumes it can
Push/pull from history.
"""
def __init__(self, shell) -> None: ...
def append_string(self, string): ...
def load_history_strings(self): ...
def store_string(self, string: str) -> None: ...
class TerminalInteractiveShell(InteractiveShell):
mime_renderers = ...
space_for_menu = Integer(6, help=...).tag(config=True)
pt_app: UnionType[PromptSession, None] = ...
auto_suggest: UnionType[AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None] = ...
auto_suggest: UnionType[AutoSuggestFromHistory, None] = ...
debugger_history = ...
debugger_history_file = ...
simple_prompt = ...
@@ -121,5 +81,3 @@ class TerminalInteractiveShell(InteractiveShell):
def switch_doctest_mode(self, mode): # -> None:
"""Switch prompts to classic for %doctest_mode"""
...
if __name__ == "__main__": ...

View File

@@ -1,70 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from IPython.core.application import BaseIPythonApplication
from IPython.core.crashhandler import CrashHandler
from IPython.core.shellapp import InteractiveShellApp
from traitlets.config.application import catch_config_error
"""
The :class:`~traitlets.config.application.Application` object for the command
line :command:`ipython` program.
"""
_examples = ...
class IPAppCrashHandler(CrashHandler):
"""sys.excepthook for IPython itself, leaves a detailed report on disk."""
def __init__(self, app) -> None: ...
def make_report(self, traceback): # -> str:
"""Return a string containing a crash report."""
...
flags = ...
frontend_flags = ...
addflag = ...
classic_config = ...
aliases = ...
class LocateIPythonApp(BaseIPythonApplication):
description = ...
subcommands = ...
def start(self): ...
class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
name = ...
description = ...
crash_handler_class = IPAppCrashHandler
examples = ...
flags = ...
aliases = ...
classes = ...
interactive_shell_class = ...
subcommands = ...
auto_create = ...
quick = ...
display_banner = ...
force_interact = ...
something_to_run = ...
@catch_config_error
def initialize(self, argv=...): # -> None:
"""Do actions after construct, but before starting the app."""
...
def init_shell(self): # -> None:
"""initialize the InteractiveShell instance"""
...
def init_banner(self): # -> None:
"""optionally display the banner"""
...
def start(self): ...
def load_default_config(ipython_dir=...): # -> Instance | Any:
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
...
launch_new_instance = ...
if __name__ == "__main__": ...

View File

@@ -1,119 +0,0 @@
"""
This type stub file was generated by pyright.
"""
import sys
from IPython.core.magic import Magics
from IPython.core.magic import line_magic
from IPython.core.magic import magics_class
from IPython.testing.skipdoctest import skip_doctest
"""Extra magics for terminal use."""
def get_pasted_lines(sentinel, l_input=..., quiet=...): # -> Generator[Unknown, Any, None]:
"""Yield pasted lines until the user enters the given sentinel value."""
...
@magics_class
class TerminalMagics(Magics):
def __init__(self, shell) -> None: ...
def store_or_execute(self, block, name, store_history=...): # -> None:
"""Execute a block, or store it in a variable, per the user's request."""
...
def preclean_input(self, block): ...
def rerun_pasted(self, name=...): # -> None:
"""Rerun a previously pasted command."""
...
@line_magic
def autoindent(self, parameter_s=...): # -> None:
"""Toggle autoindent on/off (deprecated)"""
...
@skip_doctest
@line_magic
def cpaste(self, parameter_s=...): # -> None:
"""Paste & execute a pre-formatted code block from clipboard.
You must terminate the block with '--' (two minus-signs) or Ctrl-D
alone on the line. You can also provide your own sentinel with '%paste
-s %%' ('%%' is the new sentinel for this operation).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%cpaste foo'.
This assigns the pasted block to variable 'foo' as string, without
dedenting or executing it (preceding >>> and + is still stripped)
'%cpaste -r' re-executes the block previously entered by cpaste.
'%cpaste -q' suppresses any additional output messages.
Do not be alarmed by garbled output on Windows (it's a readline bug).
Just press enter and type -- (and press enter again) and the block
will be what was just pasted.
Shell escapes are not supported (yet).
See Also
--------
paste : automatically pull code from clipboard.
Examples
--------
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> a = ["world!", "Hello"]
:>>> print(" ".join(sorted(a)))
:--
Hello world!
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> %alias_magic t timeit
:>>> %t -n1 pass
:--
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
354 ns ± 224 ns per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
...
@line_magic
def paste(self, parameter_s=...): # -> None:
"""Paste & execute a pre-formatted code block from clipboard.
The text is pulled directly from the clipboard without user
intervention and printed back on the screen before execution (unless
the -q flag is given to force quiet mode).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%paste foo'.
This assigns the pasted block to variable 'foo' as string, without
executing it (preceding >>> and + is still stripped).
Options:
-r: re-executes the block previously entered by cpaste.
-q: quiet mode: do not echo the pasted text back to the terminal.
IPython statements (magics, shell escapes) are not supported (yet).
See Also
--------
cpaste : manually paste code into terminal until you mark its end.
"""
...
if sys.platform == "win32": ...

View File

@@ -1,27 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from IPython.core.displayhook import DisplayHook
"""Terminal input and output prompts."""
class Prompts:
def __init__(self, shell) -> None: ...
def vi_mode(self): ...
def in_prompt_tokens(self): ...
def continuation_prompt_tokens(self, width=...): ...
def rewrite_prompt_tokens(self): ...
def out_prompt_tokens(self): ...
class ClassicPrompts(Prompts):
def in_prompt_tokens(self): ...
def continuation_prompt_tokens(self, width=...): ...
def rewrite_prompt_tokens(self): ...
def out_prompt_tokens(self): ...
class RichPromptDisplayHook(DisplayHook):
"""Subclass of base display hook using coloured prompt"""
def write_output_prompt(self): ...
def write_format_data(self, format_dict, md_dict=...) -> None: ...

View File

@@ -1,23 +0,0 @@
"""
This type stub file was generated by pyright.
"""
import importlib
import os
aliases = ...
backends = ...
registered = ...
def register(name, inputhook): # -> None:
"""Register the function *inputhook* as an event loop integration."""
...
class UnknownBackend(KeyError):
def __init__(self, name) -> None: ...
def set_qt_api(gui): # -> str | None:
"""Sets the `QT_API` environment variable if it isn't already set."""
...
def get_inputhook_name_and_func(gui): ...

View File

@@ -1,29 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from prompt_toolkit.completion import Completer
from prompt_toolkit.lexers import Lexer
"""prompt-toolkit utilities
Everything in this module is a private API,
not to be used outside IPython.
"""
_completion_sentinel = ...
class IPythonPTCompleter(Completer):
"""Adaptor to provide IPython completions to prompt_toolkit"""
def __init__(self, ipy_completer=..., shell=...) -> None: ...
@property
def ipy_completer(self): ...
def get_completions(self, document, complete_event): ...
class IPythonPTLexer(Lexer):
"""
Wrapper around PythonLexer and BashLexer.
"""
def __init__(self) -> None: ...
def lex_document(self, document): ...

View File

@@ -1,149 +0,0 @@
"""
This type stub file was generated by pyright.
"""
import os
import signal
import sys
import warnings
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
from IPython.core.getipython import get_ipython
from IPython.terminal.shortcuts import auto_match as match
from IPython.terminal.shortcuts import auto_suggest
from IPython.terminal.shortcuts.filters import filter_from_string
from IPython.utils.decorators import undoc
from prompt_toolkit.application.current import get_app
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Condition
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings import named_commands as nc
from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.key_binding.vi_state import InputMode
from prompt_toolkit.key_binding.vi_state import ViState
"""
Module to define and register Terminal IPython shortcuts with
:mod:`prompt_toolkit`
"""
__all__ = ["create_ipython_shortcuts"]
@dataclass
class BaseBinding:
command: Callable[[KeyPressEvent], Any]
keys: List[str]
@dataclass
class RuntimeBinding(BaseBinding):
filter: Condition
@dataclass
class Binding(BaseBinding):
condition: Optional[str] = ...
def __post_init__(self): ...
def create_identifier(handler: Callable): ...
AUTO_MATCH_BINDINGS = ...
AUTO_SUGGEST_BINDINGS = ...
SIMPLE_CONTROL_BINDINGS = ...
ALT_AND_COMOBO_CONTROL_BINDINGS = ...
def add_binding(bindings: KeyBindings, binding: Binding): ...
def create_ipython_shortcuts(shell, skip=...) -> KeyBindings:
"""Set up the prompt_toolkit keyboard shortcuts for IPython.
Parameters
----------
shell: InteractiveShell
The current IPython shell Instance
skip: List[Binding]
Bindings to skip.
Returns
-------
KeyBindings
the keybinding instance for prompt toolkit.
"""
...
def reformat_and_execute(event): # -> None:
"""Reformat code and execute it"""
...
def reformat_text_before_cursor(buffer, document, shell): ...
def handle_return_or_newline_or_execute(event): ...
def newline_or_execute_outer(shell): ...
def previous_history_or_previous_completion(event): # -> None:
"""
Control-P in vi edit mode on readline is history next, unlike default prompt toolkit.
If completer is open this still select previous completion.
"""
...
def next_history_or_next_completion(event): # -> None:
"""
Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit.
If completer is open this still select next completion.
"""
...
def dismiss_completion(event): # -> None:
"""Dismiss completion"""
...
def reset_buffer(event): # -> None:
"""Reset buffer"""
...
def reset_search_buffer(event): # -> None:
"""Reset search buffer"""
...
def suspend_to_bg(event): # -> None:
"""Suspend to background"""
...
def quit(event): # -> None:
"""
Quit application with ``SIGQUIT`` if supported or ``sys.exit`` otherwise.
On platforms that support SIGQUIT, send SIGQUIT to the current process.
On other platforms, just exit the process with a message.
"""
...
def indent_buffer(event): # -> None:
"""Indent buffer"""
...
def newline_autoindent(event): # -> None:
"""Insert a newline after the cursor indented appropriately.
Fancier version of former ``newline_with_copy_margin`` which should
compute the correct indentation of the inserted line. That is to say, indent
by 4 extra space after a function definition, class definition, context
manager... And dedent by 4 space after ``pass``, ``return``, ``raise ...``.
"""
...
def open_input_in_editor(event): # -> None:
"""Open code from input in external editor"""
...
if sys.platform == "win32": ...
else:
@undoc
def win_paste(event): # -> None:
"""Stub used on other platforms"""
...
KEY_BINDINGS = ...

View File

@@ -1,65 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from prompt_toolkit.key_binding import KeyPressEvent
"""
Utilities function for keybinding with prompt toolkit.
This will be bound to specific key press and filter modes,
like whether we are in edit mode, and whether the completer is open.
"""
def parenthesis(event: KeyPressEvent): # -> None:
"""Auto-close parenthesis"""
...
def brackets(event: KeyPressEvent): # -> None:
"""Auto-close brackets"""
...
def braces(event: KeyPressEvent): # -> None:
"""Auto-close braces"""
...
def double_quote(event: KeyPressEvent): # -> None:
"""Auto-close double quotes"""
...
def single_quote(event: KeyPressEvent): # -> None:
"""Auto-close single quotes"""
...
def docstring_double_quotes(event: KeyPressEvent): # -> None:
"""Auto-close docstring (double quotes)"""
...
def docstring_single_quotes(event: KeyPressEvent): # -> None:
"""Auto-close docstring (single quotes)"""
...
def raw_string_parenthesis(event: KeyPressEvent): # -> None:
"""Auto-close parenthesis in raw strings"""
...
def raw_string_bracket(event: KeyPressEvent): # -> None:
"""Auto-close bracker in raw strings"""
...
def raw_string_braces(event: KeyPressEvent): # -> None:
"""Auto-close braces in raw strings"""
...
def skip_over(event: KeyPressEvent): # -> None:
"""Skip over automatically added parenthesis/quote.
(rather than adding another parenthesis/quote)"""
...
def delete_pair(event: KeyPressEvent): # -> None:
"""Delete auto-closed parenthesis"""
...
auto_match_parens = ...
auto_match_parens_raw_string = ...

View File

@@ -1,101 +0,0 @@
"""
This type stub file was generated by pyright.
"""
from typing import Optional
from typing import Union
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.auto_suggest import Suggestion
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.history import History
from prompt_toolkit.key_binding import KeyPressEvent
from prompt_toolkit.layout.processors import Processor
from prompt_toolkit.layout.processors import Transformation
from prompt_toolkit.layout.processors import TransformationInput
from prompt_toolkit.shortcuts import PromptSession
class AppendAutoSuggestionInAnyLine(Processor):
"""
Append the auto suggestion to lines other than the last (appending to the
last line is natively supported by the prompt toolkit).
"""
def __init__(self, style: str = ...) -> None: ...
def apply_transformation(self, ti: TransformationInput) -> Transformation: ...
class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory):
"""
A subclass of AutoSuggestFromHistory that allow navigation to next/previous
suggestion from history. To do so it remembers the current position, but it
state need to carefully be cleared on the right events.
"""
def __init__(self) -> None: ...
def reset_history_position(self, _: Buffer): ...
def disconnect(self): ...
def connect(self, pt_app: PromptSession): ...
def get_suggestion(self, buffer: Buffer, document: Document) -> Optional[Suggestion]: ...
def up(self, query: str, other_than: str, history: History) -> None: ...
def down(self, query: str, other_than: str, history: History) -> None: ...
def accept_or_jump_to_end(event: KeyPressEvent): # -> None:
"""Apply autosuggestion or jump to end of line."""
...
def accept(event: KeyPressEvent): # -> None:
"""Accept autosuggestion"""
...
def discard(event: KeyPressEvent): # -> None:
"""Discard autosuggestion"""
...
def accept_word(event: KeyPressEvent): # -> None:
"""Fill partial autosuggestion by word"""
...
def accept_character(event: KeyPressEvent): # -> None:
"""Fill partial autosuggestion by character"""
...
def accept_and_keep_cursor(event: KeyPressEvent): # -> None:
"""Accept autosuggestion and keep cursor in place"""
...
def accept_and_move_cursor_left(event: KeyPressEvent): # -> None:
"""Accept autosuggestion and move cursor left in place"""
...
def backspace_and_resume_hint(event: KeyPressEvent): # -> None:
"""Resume autosuggestions after deleting last character"""
...
def resume_hinting(event: KeyPressEvent): # -> None:
"""Resume autosuggestions"""
...
def up_and_update_hint(event: KeyPressEvent): # -> None:
"""Go up and update hint"""
...
def down_and_update_hint(event: KeyPressEvent): # -> None:
"""Go down and update hint"""
...
def accept_token(event: KeyPressEvent): # -> None:
"""Fill partial autosuggestion by token"""
...
Provider = Union[AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None]
def swap_autosuggestion_up(event: KeyPressEvent): # -> None:
"""Get next autosuggestion from history."""
...
def swap_autosuggestion_down(event: KeyPressEvent): # -> None:
"""Get previous autosuggestion from history."""
...
def __getattr__(key): ...

View File

@@ -1,81 +0,0 @@
"""
This type stub file was generated by pyright.
"""
import ast
from typing import Callable
from typing import Dict
from typing import Union
from IPython.utils.decorators import undoc
from prompt_toolkit.filters import Condition
from prompt_toolkit.filters import Filter
from prompt_toolkit.key_binding import KeyPressEvent
from prompt_toolkit.layout.layout import FocusableElement
"""
Filters restricting scope of IPython Terminal shortcuts.
"""
@undoc
@Condition
def cursor_in_leading_ws(): ...
def has_focus(value: FocusableElement): # -> Condition:
"""Wrapper around has_focus adding a nice `__name__` to tester function"""
...
@undoc
@Condition
def has_line_below() -> bool: ...
@undoc
@Condition
def is_cursor_at_the_end_of_line() -> bool: ...
@undoc
@Condition
def has_line_above() -> bool: ...
@Condition
def ebivim(): ...
@Condition
def supports_suspend(): ...
@Condition
def auto_match(): ...
def all_quotes_paired(quote, buf): ...
_preceding_text_cache: Dict[Union[str, Callable], Condition] = ...
_following_text_cache: Dict[Union[str, Callable], Condition] = ...
def preceding_text(pattern: Union[str, Callable]): ...
def following_text(pattern): ...
@Condition
def not_inside_unclosed_string(): ...
@Condition
def navigable_suggestions(): ...
@Condition
def readline_like_completions(): ...
@Condition
def is_windows_os(): ...
class PassThrough(Filter):
"""A filter allowing to implement pass-through behaviour of keybindings.
Prompt toolkit key processor dispatches only one event per binding match,
which means that adding a new shortcut will suppress the old shortcut
if the keybindings are the same (unless one is filtered out).
To stop a shortcut binding from suppressing other shortcuts:
- add the `pass_through` filter to list of filter, and
- call `pass_through.reply(event)` in the shortcut handler.
"""
def __init__(self) -> None: ...
def reply(self, event: KeyPressEvent): ...
def __call__(self): ...
pass_through = ...
default_buffer_focused = ...
KEYBINDING_FILTERS = ...
def eval_node(node: Union[ast.AST, None]): ...
def filter_from_string(code: str): ...
__all__ = ["KEYBINDING_FILTERS", "filter_from_string"]

View File

@@ -1,2 +0,0 @@
from .client import DockerClient as DockerClient
from .client import from_env as from_env

View File

@@ -1 +0,0 @@
from .client import APIClient as APIClient

View File

@@ -1,150 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
log = ...
class BuildApiMixin:
def build(
self,
path=...,
tag=...,
quiet=...,
fileobj=...,
nocache=...,
rm=...,
timeout=...,
custom_context=...,
encoding=...,
pull=...,
forcerm=...,
dockerfile=...,
container_limits=...,
decode=...,
buildargs=...,
gzip=...,
shmsize=...,
labels=...,
cache_from=...,
target=...,
network_mode=...,
squash=...,
extra_hosts=...,
platform=...,
isolation=...,
use_config_proxy=...,
):
"""Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
readable file-like object to a Dockerfile.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
Example:
>>> from io import BytesIO
>>> from docker import APIClient
>>> dockerfile = '''
... # Shared Volume
... FROM busybox:buildroot-2014.02
... VOLUME /data
... CMD ["/bin/sh"]
... '''
>>> f = BytesIO(dockerfile.encode('utf-8'))
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
>>> response = [line for line in cli.build(
... fileobj=f, rm=True, tag='yourname/volume'
... )]
>>> response
['{"stream":" ---\\u003e a9eb17255234\\n"}',
'{"stream":"Step 1 : VOLUME /data\\n"}',
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
'{"stream":" ---\\u003e 713bca62012e\\n"}',
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
'{"stream":"Successfully built 032b8b2855fc\\n"}']
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
gzip (bool): If set to ``True``, gzip compression/encoding is used
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (:py:class:`list`): A list of images used for build
cache resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
A generator for the build output.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
...
@utils.minimum_version("1.31")
def prune_builds(self):
"""Delete the builder cache.
Returns:
(dict): A dictionary containing information about the operation's
result. The ``SpaceReclaimed`` key indicates the amount of
bytes of disk space reclaimed.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def process_dockerfile(dockerfile, path): ...

View File

@@ -1,96 +0,0 @@
"""This type stub file was generated by pyright."""
import requests
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
from .network import NetworkApiMixin
from .plugin import PluginApiMixin
from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
class APIClient(
requests.Session,
BuildApiMixin,
ConfigApiMixin,
ContainerApiMixin,
DaemonApiMixin,
ExecApiMixin,
ImageApiMixin,
NetworkApiMixin,
PluginApiMixin,
SecretApiMixin,
ServiceApiMixin,
SwarmApiMixin,
VolumeApiMixin,
):
"""A low-level client for the Docker Engine API.
Example:
>>> import docker
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
>>> client.version()
{u'ApiVersion': u'1.33',
u'Arch': u'amd64',
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
u'GitCommit': u'f4ffd2511c',
u'GoVersion': u'go1.9.2',
u'KernelVersion': u'4.14.3-1-ARCH',
u'MinAPIVersion': u'1.12',
u'Os': u'linux',
u'Version': u'17.10.0-ce'}
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
__attrs__ = ...
def __init__(
self,
base_url=...,
version=...,
timeout=...,
tls=...,
user_agent=...,
num_pools=...,
credstore_env=...,
use_ssh_client=...,
max_pool_size=...,
) -> None: ...
def get_adapter(self, url): ...
@property
def api_version(self): ...
def reload_config(self, dockercfg_path=...): # -> None:
"""Force a reload of the auth configuration.
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
None
"""
...

View File

@@ -1,61 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class ConfigApiMixin:
@utils.minimum_version("1.30")
def create_config(self, name, data, labels=..., templating=...):
"""Create a config.
Args:
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
templating (dict): dictionary containing the name of the
templating driver to be used expressed as
{ name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
...
@utils.minimum_version("1.30")
@utils.check_resource("id")
def inspect_config(self, id):
"""Retrieve config metadata.
Args:
id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
...
@utils.minimum_version("1.30")
@utils.check_resource("id")
def remove_config(self, id): # -> Literal[True]:
"""Remove a config.
Args:
id (string): Full ID of the config to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
...
@utils.minimum_version("1.30")
def configs(self, filters=...):
"""List configs.
Args:
filters (dict): A map of filters to process on the configs
list. Available filters: ``names``
Returns (list): A list of configs
"""
...

View File

@@ -1,962 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class ContainerApiMixin:
@utils.check_resource("container")
def attach(self, container, stdout=..., stderr=..., stream=..., logs=..., demux=...): # -> CancellableStream:
"""Attach to a container.
The ``.logs()`` function is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
container (str): The container to attach to.
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
demux (bool): Keep stdout and stderr separate.
Returns:
By default, the container's output as a single string (two if
``demux=True``: one for stdout and one for stderr).
If ``stream=True``, an iterator of output strings. If
``demux=True``, two iterators are returned: one for stdout and one
for stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def attach_socket(self, container, params=..., ws=...):
"""Like ``attach``, but returns the underlying socket-like object for the
HTTP request.
Args:
container (str): The container to attach to.
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
For ``detachKeys``, ~/.docker/config.json is used by default.
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def commit(self, container, repository=..., tag=..., message=..., author=..., changes=..., conf=...):
"""Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def containers(
self, quiet=..., all=..., trunc=..., latest=..., since=..., before=..., limit=..., size=..., filters=...
): # -> list[dict[str, Unknown]]:
"""List containers. Similar to the ``docker ps`` command.
Args:
quiet (bool): Only display numeric Ids
all (bool): Show all containers. Only running containers are shown
by default
trunc (bool): Truncate output
latest (bool): Show only the latest created container, include
non-running ones.
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
size (bool): Display sizes
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``<image-name>[:tag]``, ``<image-id>``, or
``<image@digest>``.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
<https://docs.docker.com/engine/reference/commandline/ps>`_.
Returns:
A list of dicts, one per container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def create_container(
self,
image,
command=...,
hostname=...,
user=...,
detach=...,
stdin_open=...,
tty=...,
ports=...,
environment=...,
volumes=...,
network_disabled=...,
name=...,
entrypoint=...,
working_dir=...,
domainname=...,
host_config=...,
mac_address=...,
labels=...,
stop_signal=...,
networking_config=...,
healthcheck=...,
stop_timeout=...,
runtime=...,
use_config_proxy=...,
platform=...,
):
"""Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
The arguments that are passed directly to this function are
host-independent configuration options. Host-specific configuration
is passed with the `host_config` argument. You'll normally want to
use this method in combination with the :py:meth:`create_host_config`
method to generate ``host_config``.
**Port bindings**
Port binding is done in two parts: first, provide a list of ports to
open inside the container with the ``ports`` parameter, then declare
bindings with the ``host_config`` parameter. For example:
.. code-block:: python
container_id = client.api.create_container(
'busybox', 'ls', ports=[1111, 2222],
host_config=client.api.create_host_config(port_bindings={
1111: 4567,
2222: None
})
)
You can limit the host address on which the port will be exposed like
such:
.. code-block:: python
client.api.create_host_config(
port_bindings={1111: ('127.0.0.1', 4567)}
)
Or without host port assignment:
.. code-block:: python
client.api.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
container_id = client.api.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
host_config=client.api.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
To bind multiple host ports to a single container port, use the
following syntax:
.. code-block:: python
client.api.create_host_config(port_bindings={
1111: [1234, 4567]
})
You can also bind multiple IPs to a single container port:
.. code-block:: python
client.api.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
]
})
**Using volumes**
Volume declaration is done in two parts. Provide a list of
paths to use as mountpoints inside the container with the
``volumes`` parameter, and declare mappings from paths on the host
in the ``host_config`` section.
.. code-block:: python
container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=client.api.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
},
'/var/www': {
'bind': '/mnt/vol1',
'mode': 'ro',
}
})
)
You can alternatively specify binds as a list. This code is equivalent
to the example above:
.. code-block:: python
container_id = client.api.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=client.api.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
)
**Networking**
You can specify networks to connect the container to by using the
``networking_config`` parameter. At the time of creation, you can
only connect a container to a single networking, but you
can create more connections by using
:py:meth:`~connect_container_to_network`.
For example:
.. code-block:: python
networking_config = client.api.create_networking_config({
'network1': client.api.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
ctnr = client.api.create_container(
img, command, networking_config=networking_config
)
Args:
image (str): The image to run
command (str or list): The command to be run in the container
hostname (str): Optional hostname for the container
user (str or int): Username or UID
detach (bool): Detached mode: run container in the background and
return container ID
stdin_open (bool): Keep STDIN open even if not attached
tty (bool): Allocate a pseudo-TTY
ports (list of ints): A list of port numbers
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
volumes (str or list): List of paths inside the container to use
as volumes.
network_disabled (bool): Disable networking
name (str): A name for the container
entrypoint (str or list): An entrypoint
working_dir (str): Path to the working directory
domainname (str): The domain name to use for the container
host_config (dict): A dictionary created with
:py:meth:`create_host_config`.
mac_address (str): The Mac Address to assign the container
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
stop_timeout (int): Timeout to stop the container, in seconds.
Default: 10
networking_config (dict): A networking configuration generated
by :py:meth:`create_networking_config`.
runtime (str): Runtime to use with this container.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being created.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Returns:
A dictionary with an image 'Id' key and a 'Warnings' key.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def create_container_config(self, *args, **kwargs): ...
def create_container_from_config(self, config, name=..., platform=...): ...
def create_host_config(self, *args, **kwargs): # -> HostConfig:
"""Create a dictionary for the ``host_config`` argument to
:py:meth:`create_container`.
Args:
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
binds (dict): Volumes to bind. See :py:meth:`create_container`
for more information.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
dns_search (:py:class:`list`): DNS search domains.
extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
init (bool): Run an init inside the container that forwards
signals and reaps processes
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: ``None``.
links (dict): Mapping of links using the
``{'container': 'alias'}`` format. The alias is optional.
Containers declared in this dict will be linked to the new
container using the provided alias. Default: ``None``.
log_config (LogConfig): Logging configuration
lxc_conf (dict): LXC config.
mem_limit (float or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
mem_reservation (float or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
mounts (:py:class:`list`): Specification for mounts to be added to
the container. More powerful alternative to ``binds``. Each
item in the list is expected to be a
:py:class:`docker.types.Mount` object.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
This mode is incompatible with ``port_bindings``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
port_bindings (dict): See :py:meth:`create_container`
for more information.
Imcompatible with ``host`` in ``network_mode``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of :py:class:`docker.types.Ulimit` instances.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: ``host``
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
runtime (str): Runtime to use with this container.
Returns:
(dict) A dictionary which can be passed to the ``host_config``
argument to :py:meth:`create_container`.
Example:
>>> client.api.create_host_config(
... privileged=True,
... cap_drop=['MKNOD'],
... volumes_from=['nostalgic_newton'],
... )
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
"""
...
def create_networking_config(self, *args, **kwargs): # -> NetworkingConfig:
"""Create a networking config dictionary to be used as the
``networking_config`` parameter in :py:meth:`create_container`.
Args:
endpoints_config (dict): A dictionary mapping network names to
endpoint configurations generated by
:py:meth:`create_endpoint_config`.
Returns:
(dict) A networking config.
Example:
>>> client.api.create_network('network1')
>>> networking_config = client.api.create_networking_config({
'network1': client.api.create_endpoint_config()
})
>>> container = client.api.create_container(
img, command, networking_config=networking_config
)
"""
...
def create_endpoint_config(self, *args, **kwargs): # -> EndpointConfig:
"""Create an endpoint config dictionary to be used with
:py:meth:`create_networking_config`.
Args:
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (dict): Mapping of links for this endpoint using the
``{'container': 'alias'}`` format. The alias is optional.
Containers declared in this dict will be linked to this
container using the provided alias. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Returns:
(dict) An endpoint config.
Example:
>>> endpoint_config = client.api.create_endpoint_config(
aliases=['web', 'app'],
links={'app_db': 'db', 'another': None},
ipv4_address='132.65.0.123'
)
"""
...
@utils.check_resource("container")
def diff(self, container):
"""Inspect changes on a container's filesystem.
Args:
container (str): The container to diff
Returns:
(list) A list of dictionaries containing the attributes `Path`
and `Kind`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def export(self, container, chunk_size=...):
"""Export the contents of a filesystem as a tar archive.
Args:
container (str): The container to export
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): The archived filesystem data stream
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def get_archive(self, container, path, chunk_size=..., encode_stream=...): # -> tuple[Unknown, Any | None]:
"""Retrieve a file or folder from a container in the form of a tar
archive.
Args:
container (str): The container where the file is located
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> c = docker.APIClient()
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = c.api.get_archive(container, '/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
"""
...
@utils.check_resource("container")
def inspect_container(self, container):
"""Identical to the `docker inspect` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of `docker inspect`, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def kill(self, container, signal=...): # -> None:
"""Kill a container or send a signal to a container.
Args:
container (str): The container to kill
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def logs(
self, container, stdout=..., stderr=..., stream=..., timestamps=..., tail=..., since=..., follow=..., until=...
): # -> CancellableStream:
"""Get logs from a container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
container (str): The container to get logs from
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime, int, or float): Show logs since a given datetime,
integer epoch (in seconds) or float (in fractional seconds)
follow (bool): Follow log output. Default ``False``
until (datetime, int, or float): Show logs that occurred before
the given datetime, integer epoch (in seconds), or
float (in fractional seconds)
Returns:
(generator or str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def pause(self, container): # -> None:
"""Pauses all processes within a container.
Args:
container (str): The container to pause
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def port(self, container, private_port): # -> None:
"""Lookup the public-facing port that is NAT-ed to ``private_port``.
Identical to the ``docker port`` command.
Args:
container (str): The container to look up
private_port (int): The private port to inspect
Returns:
(list of dict): The mapping for the host ports
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
.. code-block:: bash
$ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
.. code-block:: python
>>> client.api.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
...
@utils.check_resource("container")
def put_archive(self, container, path, data):
"""Insert a file or folder in an existing container using a tar archive as
source.
Args:
container (str): The container where the file(s) will be extracted
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes or stream): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.25")
def prune_containers(self, filters=...):
"""Delete stopped containers.
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted container IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def remove_container(self, container, v=..., link=..., force=...): # -> None:
"""Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def rename(self, container, name): # -> None:
"""Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def resize(self, container, height, width): # -> None:
"""Resize the tty session.
Args:
container (str or dict): The container to resize
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def restart(self, container, timeout=...): # -> None:
"""Restart a container. Similar to the ``docker restart`` command.
Args:
container (str or dict): The container to restart. If a dict, the
``Id`` key is used.
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def start(self, container, *args, **kwargs): # -> None:
"""Start a container. Similar to the ``docker start`` command, but
doesn't support attach options.
**Deprecation warning:** Passing configuration options in ``start`` is
no longer supported. Users are expected to provide host config options
in the ``host_config`` parameter of
:py:meth:`~ContainerApiMixin.create_container`.
Args:
container (str): The container to start
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.DeprecatedMethod`
If any argument besides ``container`` are provided.
Example:
>>> container = client.api.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
>>> client.api.start(container=container.get('Id'))
"""
...
@utils.check_resource("container")
def stats(self, container, decode=..., stream=..., one_shot=...):
"""Stream statistics for a specific container. Similar to the
``docker stats`` command.
Args:
container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True.
False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
one_shot (bool): If set to true, Only get a single stat instead of
waiting for 2 cycles. Must be used with stream=false. False by
default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def stop(self, container, timeout=...): # -> None:
"""Stops a container. Similar to the ``docker stop`` command.
Args:
container (str): The container to stop
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. If None, then the
StopTimeout value of the container will be used.
Default: None
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def top(self, container, ps_args=...):
"""Display the running processes of a container.
Args:
container (str): The container to inspect
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def unpause(self, container): # -> None:
"""Unpause all processes within a container.
Args:
container (str): The container to unpause
"""
...
@utils.minimum_version("1.22")
@utils.check_resource("container")
def update_container(
self,
container,
blkio_weight=...,
cpu_period=...,
cpu_quota=...,
cpu_shares=...,
cpuset_cpus=...,
cpuset_mems=...,
mem_limit=...,
mem_reservation=...,
memswap_limit=...,
kernel_memory=...,
restart_policy=...,
):
"""Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (float or str): Memory limit
mem_reservation (float or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("container")
def wait(self, container, timeout=..., condition=...):
"""Block until a container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
container (str or dict): The container to wait on. If a dict, the
``Id`` key is used.
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,115 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class DaemonApiMixin:
@utils.minimum_version("1.25")
def df(self):
"""Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def events(self, since=..., until=..., filters=..., decode=...): # -> CancellableStream:
"""Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
...
def info(self):
"""Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def login(
self, username, password=..., email=..., registry=..., reauth=..., dockercfg_path=...
): # -> dict[str, str | Unknown] | Any:
"""Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def ping(self):
"""Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def version(self, api_version=...):
"""Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,100 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class ExecApiMixin:
@utils.check_resource("container")
def exec_create(
self,
container,
cmd,
stdout=...,
stderr=...,
stdin=...,
tty=...,
privileged=...,
user=...,
environment=...,
workdir=...,
detach_keys=...,
):
"""Sets up an exec instance in a running container.
Args:
container (str): Target container where exec instance will be
created
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
detach_keys (str): Override the key sequence for detaching
a container. Format is a single character `[a-Z]`
or `ctrl-<value>` where `<value>` is one of:
`a-z`, `@`, `^`, `[`, `,` or `_`.
~/.docker/config.json is used by default.
Returns:
(dict): A dictionary with an exec ``Id`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def exec_inspect(self, exec_id):
"""Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def exec_resize(self, exec_id, height=..., width=...): # -> None:
"""Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
...
@utils.check_resource("exec_id")
def exec_start(self, exec_id, detach=..., tty=..., stream=..., socket=..., demux=...): # -> CancellableStream:
"""Start a previously set up exec instance.
Args:
exec_id (str): ID of the exec instance
detach (bool): If true, detach from the exec command.
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Return response data progressively as an iterator
of strings, rather than a single string.
socket (bool): Return the connection socket to allow custom
read/write operations. Must be closed by the caller when done.
demux (bool): Return stdout and stderr separately
Returns:
(generator or str or tuple): If ``stream=True``, a generator
yielding response chunks. If ``socket=True``, a socket object for
the connection. A string containing response data otherwise. If
``demux=True``, a tuple with two elements of type byte: stdout and
stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,336 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
log = ...
class ImageApiMixin:
@utils.check_resource("image")
def get_image(self, image, chunk_size=...):
"""Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
...
@utils.check_resource("image")
def history(self, image):
"""Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def images(self, name=..., quiet=..., all=..., filters=...): # -> list[Unknown]:
"""List images. Similar to the ``docker images`` command.
Args:
name (str): Only show images belonging to the repository ``name``
quiet (bool): Only return numeric IDs as a list.
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def import_image(self, src=..., repository=..., tag=..., image=..., changes=..., stream_src=...):
"""Import an image. Similar to the ``docker import`` command.
If ``src`` is a string or unicode string, it will first be treated as a
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
be taken as the name of an existing image to import from.
Args:
src (str or file): Path to tarfile, URL, or file-like object
repository (str): The repository to create
tag (str): The tag to apply
image (str): Use another image like the ``FROM`` Dockerfile
parameter
"""
...
def import_image_from_data(self, data, repository=..., tag=..., changes=...):
"""Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply
"""
...
def import_image_from_file(self, filename, repository=..., tag=..., changes=...):
"""Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist.
"""
...
def import_image_from_stream(self, stream, repository=..., tag=..., changes=...): ...
def import_image_from_url(self, url, repository=..., tag=..., changes=...):
"""Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
"""
...
def import_image_from_image(self, image, repository=..., tag=..., changes=...):
"""Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
...
@utils.check_resource("image")
def inspect_image(self, image):
"""Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for images.
Args:
image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.30")
@utils.check_resource("image")
def inspect_distribution(self, image, auth_config=...):
"""Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def load_image(self, data, quiet=...): # -> None:
"""Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.25")
def prune_images(self, filters=...):
"""Delete unused images.
Args:
filters (dict): Filters to process on the prune list.
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
Returns:
(dict): A dict containing a list of deleted image IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def pull(self, repository, tag=..., stream=..., auth_config=..., decode=..., platform=..., all_tags=...):
"""Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
is set to ``latest``.
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags, the ``tag`` parameter is
ignored.
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> resp = client.api.pull('busybox', stream=True, decode=True)
... for line in resp:
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
...
def push(self, repository, tag=..., stream=..., auth_config=..., decode=...):
"""Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> resp = client.api.push(
... 'yourname/app',
... stream=True,
... decode=True,
... )
... for line in resp:
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
"""
...
@utils.check_resource("image")
def remove_image(self, image, force=..., noprune=...):
"""Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
"""
...
def search(self, term, limit=...):
"""Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("image")
def tag(self, image, repository, tag=..., force=...):
"""Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
...
def is_file(src): ...

View File

@@ -1,174 +0,0 @@
"""This type stub file was generated by pyright."""
from ..utils import check_resource
from ..utils import minimum_version
class NetworkApiMixin:
def networks(self, names=..., ids=..., filters=...):
"""List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def create_network(
self,
name,
driver=...,
options=...,
ipam=...,
check_duplicate=...,
internal=...,
labels=...,
enable_ipv6=...,
attachable=...,
scope=...,
ingress=...,
):
"""Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
...
@minimum_version("1.25")
def prune_networks(self, filters=...):
"""Delete unused networks.
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@check_resource("net_id")
def remove_network(self, net_id): # -> None:
"""Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
...
@check_resource("net_id")
def inspect_network(self, net_id, verbose=..., scope=...):
"""Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
"""
...
@check_resource("container")
def connect_container_to_network(
self,
container,
net_id,
ipv4_address=...,
ipv6_address=...,
aliases=...,
links=...,
link_local_ips=...,
driver_opt=...,
mac_address=...,
): # -> None:
"""Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
mac_address (str): The MAC address of this container on the
network. Defaults to ``None``.
"""
...
@check_resource("container")
def disconnect_container_from_network(self, container, net_id, force=...): # -> None:
"""Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
...

View File

@@ -1,160 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class PluginApiMixin:
@utils.minimum_version("1.25")
@utils.check_resource("name")
def configure_plugin(self, name, options): # -> Literal[True]:
"""Configure a plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
options (dict): A key-value mapping of options
Returns:
``True`` if successful
"""
...
@utils.minimum_version("1.25")
def create_plugin(self, name, plugin_data_dir, gzip=...): # -> Literal[True]:
"""Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
``True`` if successful
"""
...
@utils.minimum_version("1.25")
def disable_plugin(self, name, force=...): # -> Literal[True]:
"""Disable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
force (bool): To enable the force query parameter.
Returns:
``True`` if successful
"""
...
@utils.minimum_version("1.25")
def enable_plugin(self, name, timeout=...): # -> Literal[True]:
"""Enable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
timeout (int): Operation timeout (in seconds). Default: 0
Returns:
``True`` if successful
"""
...
@utils.minimum_version("1.25")
def inspect_plugin(self, name):
"""Retrieve plugin metadata.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
A dict containing plugin info
"""
...
@utils.minimum_version("1.25")
def pull_plugin(self, remote, privileges, name=...):
"""Pull and install a plugin. After the plugin is installed, it can be
enabled using :py:meth:`~enable_plugin`.
Args:
remote (string): Remote reference for the plugin to install.
The ``:latest`` tag is optional, and is the default if
omitted.
privileges (:py:class:`list`): A list of privileges the user
consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
name (string): Local name for the pulled plugin. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
An iterable object streaming the decoded API logs
"""
...
@utils.minimum_version("1.25")
def plugins(self):
"""Retrieve a list of installed plugins.
Returns:
A list of dicts, one per plugin
"""
...
@utils.minimum_version("1.25")
def plugin_privileges(self, name):
"""Retrieve list of privileges to be granted to a plugin.
Args:
name (string): Name of the remote plugin to examine. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
A list of dictionaries representing the plugin's
permissions
"""
...
@utils.minimum_version("1.25")
@utils.check_resource("name")
def push_plugin(self, name):
"""Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
...
@utils.minimum_version("1.25")
@utils.check_resource("name")
def remove_plugin(self, name, force=...): # -> Literal[True]:
"""Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful
"""
...
@utils.minimum_version("1.26")
@utils.check_resource("name")
def upgrade_plugin(self, name, remote, privileges):
"""Upgrade an installed plugin.
Args:
name (string): Name of the plugin to upgrade. The ``:latest``
tag is optional and is the default if omitted.
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
privileges (:py:class:`list`): A list of privileges the user
consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
Returns:
An iterable object streaming the decoded API logs
"""
...

View File

@@ -1,60 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class SecretApiMixin:
@utils.minimum_version("1.25")
def create_secret(self, name, data, labels=..., driver=...):
"""Create a secret.
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
driver (DriverConfig): A custom driver configuration. If
unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret
"""
...
@utils.minimum_version("1.25")
@utils.check_resource("id")
def inspect_secret(self, id):
"""Retrieve secret metadata.
Args:
id (string): Full ID of the secret to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
...
@utils.minimum_version("1.25")
@utils.check_resource("id")
def remove_secret(self, id): # -> Literal[True]:
"""Remove a secret.
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
...
@utils.minimum_version("1.25")
def secrets(self, filters=...):
"""List secrets.
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
...

View File

@@ -1,217 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class ServiceApiMixin:
@utils.minimum_version("1.24")
def create_service(
self,
task_template,
name=...,
labels=...,
mode=...,
update_config=...,
networks=...,
endpoint_config=...,
endpoint_spec=...,
rollback_config=...,
):
"""Create a service.
Args:
task_template (TaskTemplate): Specification of the task to start as
part of the new service.
name (string): User-defined name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
Returns:
A dictionary containing an ``ID`` key for the newly created
service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
@utils.check_resource("service")
def inspect_service(self, service, insert_defaults=...):
"""Return information about a service.
Args:
service (str): Service name or ID.
insert_defaults (boolean): If true, default values will be merged
into the service inspect output.
Returns:
(dict): A dictionary of the server-side representation of the
service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
@utils.check_resource("task")
def inspect_task(self, task):
"""Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
@utils.check_resource("service")
def remove_service(self, service): # -> Literal[True]:
"""Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
def services(self, filters=..., status=...):
"""List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns:
A list of dictionaries containing data about each service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.25")
@utils.check_resource("service")
def service_logs(
self, service, details=..., follow=..., stdout=..., stderr=..., since=..., timestamps=..., tail=..., is_tty=...
):
"""Get log stream for a service.
Note: This endpoint works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
service (str): ID or name of the service
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
is_tty (bool): Whether the service's :py:class:`ContainerSpec`
enables the TTY option. If omitted, the method will query
the Engine for the information, causing an additional
roundtrip.
Returns (generator): Logs for the service.
"""
...
@utils.minimum_version("1.24")
def tasks(self, filters=...):
"""Retrieve a list of tasks.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``service``, ``node``,
``label`` and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
@utils.check_resource("service")
def update_service(
self,
service,
version,
task_template=...,
name=...,
labels=...,
mode=...,
update_config=...,
networks=...,
endpoint_config=...,
endpoint_spec=...,
fetch_current_spec=...,
rollback_config=...,
):
"""Update a service.
Args:
service (string): A service identifier (either its name or service
ID).
version (int): The version number of the service object being
updated. This is required to avoid conflicting writes.
task_template (TaskTemplate): Specification of the updated task to
start as part of the service.
name (string): New name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,318 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
log = ...
class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs): # -> SwarmSpec:
"""Create a :py:class:`docker.types.SwarmSpec` instance that can be used
as the ``swarm_spec`` argument in
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
Args:
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_cas (:py:class:`list`): Configuration for forwarding
signing requests to an external certificate authority. Use
a list of :py:class:`docker.types.SwarmExternalCA`.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
:py:class:`docker.types.SwarmSpec`
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
>>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
"""
...
@utils.minimum_version("1.24")
def get_unlock_key(self):
"""Get the unlock key for this Swarm manager.
Returns:
A ``dict`` containing an ``UnlockKey`` member
"""
...
@utils.minimum_version("1.24")
def init_swarm(
self,
advertise_addr=...,
listen_addr=...,
force_new_cluster=...,
swarm_spec=...,
default_addr_pool=...,
subnet_size=...,
data_path_addr=...,
data_path_port=...,
):
"""Initialize a new Swarm using the current connected engine as the first
node.
Args:
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
``advertise_addr`` is not specified, it will be automatically
detected when possible. Default: None
listen_addr (string): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: '0.0.0.0:2377'
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
default_addr_pool (list of strings): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
def inspect_swarm(self):
"""Retrieve low-level information about the current swarm.
Returns:
A dictionary containing data about the swarm.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("node_id")
@utils.minimum_version("1.24")
def inspect_node(self, node_id):
"""Retrieve low-level information about a swarm node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
def join_swarm(
self, remote_addrs, join_token, listen_addr=..., advertise_addr=..., data_path_addr=...
): # -> Literal[True]:
"""Make this Engine join a swarm that has already been created.
Args:
remote_addrs (:py:class:`list`): Addresses of one or more manager
nodes already participating in the Swarm to join.
join_token (string): Secret token for joining this Swarm.
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
def leave_swarm(self, force=...): # -> Literal[True]:
"""Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.minimum_version("1.24")
def nodes(self, filters=...):
"""List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of dictionaries containing data about each swarm node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@utils.check_resource("node_id")
@utils.minimum_version("1.24")
def remove_node(self, node_id, force=...): # -> Literal[True]:
"""Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
"""
...
@utils.minimum_version("1.24")
def unlock_swarm(self, key): # -> Literal[True]:
"""Unlock a locked swarm.
Args:
key (string): The unlock key as provided by
:py:meth:`get_unlock_key`
Raises:
:py:class:`docker.errors.InvalidArgument`
If the key argument is in an incompatible format
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
Example:
>>> key = client.api.get_unlock_key()
>>> client.unlock_swarm(key)
"""
...
@utils.minimum_version("1.24")
def update_node(self, node_id, version, node_spec=...): # -> Literal[True]:
"""Update the node's configuration.
Args:
node_id (string): ID of the node to be updated.
version (int): The version number of the node object being
updated. This is required to avoid conflicting writes.
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
...
@utils.minimum_version("1.24")
def update_swarm(
self, version, swarm_spec=..., rotate_worker_token=..., rotate_manager_token=..., rotate_manager_unlock_key=...
): # -> Literal[True]:
"""Update the Swarm's configuration.
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``None``.
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,112 +0,0 @@
"""This type stub file was generated by pyright."""
from .. import utils
class VolumeApiMixin:
def volumes(self, filters=...):
"""List volumes currently registered by the docker daemon. Similar to the
``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(dict): Dictionary with list of volume objects as value of the
``Volumes`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
u'Name': u'baz'}]}
"""
...
def create_volume(self, name=..., driver=..., driver_opts=..., labels=...):
"""Create and register a named volume.
Args:
name (str): Name of the volume
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(dict): The created volume reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.api.create_volume(
... name='foobar',
... driver='local',
... driver_opts={'foo': 'bar', 'baz': 'false'},
... labels={"key": "value"},
... )
... print(volume)
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
"""
...
def inspect_volume(self, name):
"""Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
"""
...
@utils.minimum_version("1.25")
def prune_volumes(self, filters=...):
"""Delete unused volumes.
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def remove_volume(self, name, force=...): # -> None:
"""Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
...

View File

@@ -1,83 +0,0 @@
from typing import Any
from typing import Self
from .models.containers import ContainerCollection
class DockerClient:
"""A client for communicating with a Docker server.
Example:
>>> import docker
>>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
@classmethod
def from_env(cls, **kwargs: Any) -> Self:
"""Return a client configured from environment variables.
The environment variables used are the same as those used by the
Docker command-line client. They are:
.. envvar:: DOCKER_HOST
The URL to the Docker host.
.. envvar:: DOCKER_TLS_VERIFY
Verify the host against a CA certificate.
.. envvar:: DOCKER_CERT_PATH
A path to a directory containing TLS certificates to use when
connecting to the Docker host.
Args:
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections
to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example:
>>> import docker
>>> client = docker.from_env()
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
@property
def containers(self) -> ContainerCollection:
"""An object for managing containers on the server. See the
:doc:`containers documentation <containers>` for full details.
"""
...
def from_env(**attrs: Any) -> DockerClient: ...

View File

@@ -1,19 +0,0 @@
DEFAULT_DOCKER_API_VERSION = ...
MINIMUM_DOCKER_API_VERSION = ...
DEFAULT_TIMEOUT_SECONDS = ...
STREAM_HEADER_SIZE_BYTES = ...
CONTAINER_LIMITS_KEYS = ...
DEFAULT_HTTP_HOST = ...
DEFAULT_UNIX_SOCKET = ...
DEFAULT_NPIPE = ...
BYTE_UNITS = ...
INSECURE_REGISTRY_DEPRECATION_WARNING = ...
IS_WINDOWS_PLATFORM = ...
WINDOWS_LONGPATH_PREFIX = ...
DEFAULT_USER_AGENT = ...
DEFAULT_NUM_POOLS = ...
DEFAULT_NUM_POOLS_SSH = ...
DEFAULT_MAX_POOL_SIZE = ...
DEFAULT_DATA_CHUNK_SIZE = ...
DEFAULT_SWARM_ADDR_POOL = ...
DEFAULT_SWARM_SUBNET_SIZE = ...

View File

@@ -1,2 +0,0 @@
from .api import ContextAPI as ContextAPI
from .context import Context as Context

View File

@@ -1,129 +0,0 @@
"""This type stub file was generated by pyright."""
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
"""
DEFAULT_CONTEXT = ...
@classmethod
def create_context(
cls, name, orchestrator=..., host=..., tls_cfg=..., default_namespace=..., skip_tls_verify=...
): # -> Context:
"""Creates a new context.
Returns:
(Context): a Context object.
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextAlreadyExists`
If a context with the name already exists.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.create_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
...
@classmethod
def get_context(cls, name=...): # -> Context | None:
"""Retrieves a context object.
Args:
name (str): The name of the context.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.get_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
...
@classmethod
def contexts(cls): # -> list[Context]:
"""Context list.
Returns:
(Context): List of context objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@classmethod
def get_current_context(cls): # -> Context | None:
"""Get current context.
Returns:
(Context): current context object.
"""
...
@classmethod
def set_current_context(cls, name=...): ...
@classmethod
def remove_context(cls, name): # -> None:
"""Remove a context. Similar to the ``docker context rm`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
...
@classmethod
def inspect_context(
cls, name=...
): # -> dict[str, str | dict[str, Unknown] | dict[Unknown | str, dict[str, bytes | Unknown | str | bool]]] | dict[str, Unknown | dict[str, Unknown] | dict[Unknown | str, dict[str, bytes | Unknown | str | bool]]]:
"""Remove a context. Similar to the ``docker context inspect`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
...

View File

@@ -1,12 +0,0 @@
"""This type stub file was generated by pyright."""
METAFILE = ...
def get_current_context_name(): ...
def write_context_name_to_docker_config(name=...): ...
def get_context_id(name): ...
def get_context_dir(): ...
def get_meta_dir(name=...): ...
def get_meta_file(name): ...
def get_tls_dir(name=..., endpoint=...): ...
def get_context_host(path=..., tls=...): ...

View File

@@ -1,29 +0,0 @@
"""This type stub file was generated by pyright."""
class Context:
"""A context."""
def __init__(self, name, orchestrator=..., host=..., endpoints=..., tls=...) -> None: ...
def set_endpoint(self, name=..., host=..., tls_cfg=..., skip_tls_verify=..., def_namespace=...): ...
def inspect(self): ...
@classmethod
def load_context(cls, name): ...
def save(self): ...
def remove(self): ...
def __repr__(self): ...
def __call__(self): ...
def is_docker_host(self): ...
@property
def Name(self): ...
@property
def Host(self): ...
@property
def Orchestrator(self): ...
@property
def Metadata(self): ...
@property
def TLSConfig(self): ...
@property
def TLSMaterial(self): ...
@property
def Storage(self): ...

View File

@@ -1,7 +0,0 @@
from .constants import DEFAULT_LINUX_STORE as DEFAULT_LINUX_STORE
from .constants import DEFAULT_OSX_STORE as DEFAULT_OSX_STORE
from .constants import DEFAULT_WIN32_STORE as DEFAULT_WIN32_STORE
from .constants import PROGRAM_PREFIX as PROGRAM_PREFIX
from .errors import CredentialsNotFound as CredentialsNotFound
from .errors import StoreError as StoreError
from .store import Store as Store

View File

@@ -1,4 +0,0 @@
PROGRAM_PREFIX = ...
DEFAULT_LINUX_STORE = ...
DEFAULT_OSX_STORE = ...
DEFAULT_WIN32_STORE = ...

View File

@@ -1,7 +0,0 @@
"""This type stub file was generated by pyright."""
class StoreError(RuntimeError): ...
class CredentialsNotFound(StoreError): ...
class InitializationError(StoreError): ...
def process_store_error(cpe, program): ...

View File

@@ -1,27 +0,0 @@
"""This type stub file was generated by pyright."""
class Store:
def __init__(self, program, environment=...) -> None:
"""Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
...
def get(self, server): # -> Any:
"""Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
...
def store(self, server, username, secret): # -> bytes:
"""Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
...
def erase(self, server): # -> None:
"""Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
...
def list(self): # -> Any:
"""List stored credentials. Requires v0.4.0+ of the helper."""
...

View File

@@ -1,5 +0,0 @@
"""This type stub file was generated by pyright."""
def create_environment_dict(overrides): # -> dict[str, str]:
"""Create and return a copy of os.environ with the specified overrides."""
...

View File

@@ -1,62 +0,0 @@
import requests
class DockerException(Exception):
"""A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e):
"""Create a suitable APIError from requests.exceptions.HTTPError."""
...
class APIError(requests.exceptions.HTTPError, DockerException):
"""An HTTP error from the API."""
def __init__(self, message, response=..., explanation=...) -> None: ...
@property
def status_code(self): ...
def is_error(self): ...
def is_client_error(self): ...
def is_server_error(self): ...
class NotFound(APIError): ...
class ImageNotFound(NotFound): ...
class InvalidVersion(DockerException): ...
class InvalidRepository(DockerException): ...
class InvalidConfigFile(DockerException): ...
class InvalidArgument(DockerException): ...
class DeprecatedMethod(DockerException): ...
class TLSParameterError(DockerException):
def __init__(self, msg) -> None: ...
class NullResource(DockerException, ValueError): ...
class ContainerError(DockerException):
"""Represents a container that has exited with a non-zero exit code."""
def __init__(self, container, exit_status, command, image, stderr) -> None: ...
class StreamParseError(RuntimeError):
def __init__(self, reason) -> None: ...
class BuildError(DockerException):
def __init__(self, reason, build_log) -> None: ...
class ImageLoadError(DockerException): ...
def create_unexpected_kwargs_error(name, kwargs): ...
class MissingContextParameter(DockerException):
def __init__(self, param) -> None: ...
class ContextAlreadyExists(DockerException):
def __init__(self, name) -> None: ...
class ContextException(DockerException):
def __init__(self, msg) -> None: ...
class ContextNotFound(DockerException):
def __init__(self, name) -> None: ...

View File

@@ -1 +0,0 @@
"""This type stub file was generated by pyright."""

View File

@@ -1,56 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Config(Model):
"""A config."""
id_attribute = ...
def __repr__(self): ...
@property
def name(self): ...
def remove(self):
"""Remove this config.
Raises:
:py:class:`docker.errors.APIError`
If config failed to remove.
"""
...
class ConfigCollection(Collection):
"""Configs on the Docker server."""
model = Config
def create(self, **kwargs): ...
def get(self, config_id): # -> Model:
"""Get a config.
Args:
config_id (str): Config ID.
Returns:
(:py:class:`Config`): The config.
Raises:
:py:class:`docker.errors.NotFound`
If the config does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self, **kwargs): # -> list[Model]:
"""List configs. Similar to the ``docker config ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Config`): The configs.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,832 +0,0 @@
from typing import Any
from typing import Iterator
from typing import Literal
from typing import overload
from .resource import Collection
from .resource import Model
class Container(Model):
"""Local representation of a container object. Detailed configuration may
be accessed through the :py:attr:`attrs` attribute. Note that local
attributes are cached; users may call :py:meth:`reload` to
query the Docker daemon for the current properties, causing
:py:attr:`attrs` to be refreshed.
"""
@property
def name(self) -> str:
"""The name of the container."""
@property
def image(self): # -> None:
"""The image of the container."""
@property
def labels(self) -> dict[Any, Any]:
"""The labels of a container as dictionary."""
@property
def status(self) -> Literal["created", "restarting", "running", "removing", "paused", "exited"]:
"""The status of the container. For example, ``running``, or ``exited``."""
@property
def ports(self) -> dict[Any, Any]:
"""The ports that the container exposes as a dictionary."""
def attach(self, **kwargs):
"""Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
def attach_socket(self, **kwargs):
"""Like :py:meth:`attach`, but returns the underlying socket-like object
for the HTTP request.
Args:
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def commit(self, repository=..., tag=..., **kwargs):
"""Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def diff(self):
"""Inspect changes on a container's filesystem.
Returns:
(list) A list of dictionaries containing the attributes `Path`
and `Kind`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def exec_run(
self,
cmd,
stdout=...,
stderr=...,
stdin=...,
tty=...,
privileged=...,
user=...,
detach=...,
stream=...,
socket=...,
environment=...,
workdir=...,
demux=...,
): # -> ExecResult:
"""Run a command inside this container. Similar to
``docker exec``.
Args:
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command.
Default: False
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations. Default: False
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
either ``stream`` or ``socket`` is ``True``.
output: (generator, bytes, or tuple):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
If ``demux=True``, a tuple of two bytes: stdout and stderr.
A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def export(self, chunk_size=...):
"""Export the contents of the container's filesystem as a tar archive.
Args:
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def get_archive(self, path, chunk_size=..., encode_stream=...):
"""Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
encode_stream (bool): Determines if data should be encoded
(gzip-compressed) during transmission. Default: False
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = container.get_archive('/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
"""
...
def kill(self, signal: str | int = ...) -> None:
"""Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
@overload
def logs(self, stream: Literal[False] = False, **kwargs: Any) -> bytes: ...
@overload
def logs(self, stream: Literal[True] = ..., **kwargs: Any) -> Iterator[bytes]:
"""Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime, int, or float): Show logs since a given datetime,
integer epoch (in seconds) or float (in nanoseconds)
follow (bool): Follow log output. Default ``False``
until (datetime, int, or float): Show logs that occurred before
the given datetime, integer epoch (in seconds), or
float (in nanoseconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def pause(self):
"""Pauses all processes within this container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def put_archive(self, path, data):
"""Insert a file or folder in this container using a tar archive as
source.
Args:
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes or stream): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`~docker.errors.APIError` If an error occurs.
"""
...
def remove(self, v: bool | None = ..., link: bool | None = ..., force: bool | None = ...) -> None:
"""Remove this container. Similar to the ``docker rm`` command.
Args:
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def rename(self, name):
"""Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def resize(self, height, width):
"""Resize the tty session.
Args:
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def restart(self, **kwargs):
"""Restart this container. Similar to the ``docker restart`` command.
Args:
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def start(self, **kwargs):
"""Start this container. Similar to the ``docker start`` command, but
doesn't support attach options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def stats(self, **kwargs):
"""Stream statistics for this container. Similar to the
``docker stats`` command.
Args:
decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True.
False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def stop(self, **kwargs: Any) -> None:
"""Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def top(self, **kwargs: Any) -> str:
"""Display the running processes of the container.
Args:
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def unpause(self):
"""Unpause all processes within the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def update(self, **kwargs):
"""Update resource configuration of the containers.
Args:
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def wait(self, **kwargs: Any) -> dict[Any, Any]:
"""Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
class ContainerCollection(Collection):
model = Container
@overload
def run(
self, image: str, command: list[str] | str = ..., detach: Literal[True] = ..., **kwargs: Any
) -> Container: ...
@overload
def run(self, image: str, command: list[str] | str = ..., detach: Literal[False] = ..., **kwargs: Any) -> bytes:
"""Run a container. By default, it will wait for the container to finish
and return its logs, similar to ``docker run``.
If the ``detach`` argument is ``True``, it will start the container
and immediately return a :py:class:`Container` object, similar to
``docker run -d``.
Example:
Run a container and get its output:
>>> import docker
>>> client = docker.from_env()
>>> client.containers.run('alpine', 'echo hello world')
b'hello world\\n'
Run a container and detach:
>>> container = client.containers.run('bfirsh/reticulate-splines',
detach=True)
>>> container.logs()
'Reticulating spline 1...\\nReticulating spline 2...\\n'
Args:
image (str): The image to run.
command (str or list): The command to run in the container.
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cgroupns (str): Override the default cgroup namespace mode for the
container. One of:
- ``private`` the container runs in its own private cgroup
namespace.
- ``host`` use the host system's cgroup namespace.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_rt_period (int): Limit CPU real-time period in microseconds.
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
dns_search (:py:class:`list`): DNS search domains.
domainname (str or list): Set custom DNS search domains.
entrypoint (str or list): The entrypoint for the container.
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
healthcheck (dict): Specify a test to perform to check that the
container is healthy. The dict takes the following keys:
- test (:py:class:`list` or str): Test to perform to determine
container health. Possible values:
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
command.
- interval (int): The time to wait between checks in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
- timeout (int): The time to wait before considering the check
to have hung. It should be 0 or at least 1000000 (1 ms).
- retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
- start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards
signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
links (dict): Mapping of links using the
``{'container': 'alias'}`` format. The alias is optional.
Containers declared in this dict will be linked to the new
container using the provided alias. Default: ``None``.
log_config (LogConfig): Logging configuration.
lxc_conf (dict): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
mounts (:py:class:`list`): Specification for mounts to be added to
the container. More powerful alternative to ``volumes``. Each
item in the list is expected to be a
:py:class:`docker.types.Mount` object.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected
to at creation time. You can connect to additional networks
using :py:meth:`Network.connect`. Incompatible with
``network_mode``.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
This mode is incompatible with ``ports``.
Incompatible with ``network``.
network_driver_opt (dict): A dictionary of options to provide
to the network driver. Defaults to ``None``. Used in
conjuction with ``network``. Incompatible
with ``network_mode``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Only used if the method needs to pull the requested image.
ports (dict): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
``port/protocol``, where the protocol is either ``tcp``,
``udp``, or ``sctp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
- The port number, as an integer. For example,
``{'2222/tcp': 3333}`` will expose port 2222 inside the
container as port 3333 on the host.
- ``None``, to assign a random host port. For example,
``{'2222/tcp': None}``.
- A tuple of ``(address, port)`` if you want to specify the
host interface. For example,
``{'1111/tcp': ('127.0.0.1', 1111)}``.
- A list of integers, if you want to bind multiple host ports
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
remove (bool): Remove the container when it has finished running.
Default: ``False``.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
stdin_open (bool): Keep ``STDIN`` open even if not attached.
stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
Default: ``True``.
stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
Default: ``False``.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
stream (bool): If true and ``detach`` is false, return a log
generator instead of a string. Ignored if ``detach`` is true.
Default: ``False``.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
tty (bool): Allocate a pseudo-TTY.
ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of :py:class:`docker.types.Ulimit` instances.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: ``host``
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
volume name, and the value is a dictionary with the keys:
- ``bind`` The path to mount the volume inside the container
- ``mode`` Either ``rw`` to mount the volume read/write, or
``ro`` to mount it read-only.
For example:
.. code-block:: python
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
Or a list of strings which each one of its elements specifies a
mount volume.
For example:
.. code-block:: python
['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
depending on the value of the ``stdout`` and ``stderr`` arguments.
``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
or ``journald`` logging driver used. Thus, if you are using none of
these drivers, a ``None`` object is returned instead. See the
`Engine API documentation
<https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
for full details.
If ``detach`` is ``True``, a :py:class:`Container` object is
returned instead.
Raises:
:py:class:`docker.errors.ContainerError`
If the container exits with a non-zero exit code and
``detach`` is ``False``.
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
def create(self, image, command=..., **kwargs): # -> Model:
"""Create a container without starting it. Similar to ``docker create``.
Takes the same arguments as :py:meth:`run`, except for ``stdout``,
``stderr``, and ``remove``.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def get(self, container_id: str) -> Container:
"""Get a container by name or ID.
Args:
container_id (str): Container name or ID.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.NotFound`
If the container does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(
self, all=..., before=..., filters=..., limit=..., since=..., sparse=..., ignore_removed=...
): # -> list[Model] | list[Unknown]:
"""List containers. Similar to the ``docker ps`` command.
Args:
all (bool): Show all containers. Only running containers are shown
by default
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``<image-name>[:tag]``, ``<image-id>``, or
``<image@digest>``.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
<https://docs.docker.com/engine/reference/commandline/ps>`_.
sparse (bool): Do not inspect containers. Returns partial
information, but guaranteed not to block. Use
:py:meth:`Container.reload` on resulting objects to retrieve
all attributes. Default: ``False``
ignore_removed (bool): Ignore failures due to missing containers
when attempting to inspect containers from the original list.
Set to ``True`` if race conditions are likely. Has no effect
if ``sparse=True``. Default: ``False``
Returns:
(list of :py:class:`Container`)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def prune(self, filters=...): ...
RUN_CREATE_KWARGS = ...
RUN_HOST_CONFIG_KWARGS = ...
ExecResult = ...

View File

@@ -1,329 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Image(Model):
"""An image on the server."""
def __repr__(self): ...
@property
def labels(self): # -> dict[Any, Any]:
"""The labels of an image as dictionary."""
...
@property
def short_id(self):
"""The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
...
@property
def tags(self): # -> list[Unknown]:
"""The image's tags."""
...
def history(self):
"""Show the history of an image.
Returns:
(str): The history of the image.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def remove(self, force=..., noprune=...):
"""Remove this image.
Args:
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def save(self, chunk_size=..., named=...):
"""Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.images.get("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
...
def tag(self, repository, tag=..., **kwargs):
"""Tag this image into a repository. Similar to the ``docker tag``
command.
Args:
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
(bool): ``True`` if successful
"""
...
class RegistryData(Model):
"""Image metadata stored on the registry, including available platforms."""
def __init__(self, image_name, *args, **kwargs) -> None: ...
@property
def id(self):
"""The ID of the object."""
...
@property
def short_id(self):
"""The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
...
def pull(self, platform=...):
"""Pull the image digest.
Args:
platform (str): The platform to pull the image for.
Default: ``None``
Returns:
(:py:class:`Image`): A reference to the pulled image.
"""
...
def has_platform(self, platform): # -> bool:
"""Check whether the given platform identifier is available for this
digest.
Args:
platform (str or dict): A string using the ``os[/arch[/variant]]``
format, or a platform dictionary.
Returns:
(bool): ``True`` if the platform is recognized as available,
``False`` otherwise.
Raises:
:py:class:`docker.errors.InvalidArgument`
If the platform argument is not a valid descriptor.
"""
...
def reload(self): ...
class ImageCollection(Collection):
model = Image
def build(self, **kwargs): # -> Model | tuple[Model, Iterator[Any]]:
"""Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set.
If you already have a tar file for the Docker build context (including
a Dockerfile), pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is also
compressed, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
low-level API.
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
image that was built. The second item is a generator of the
build logs as JSON-decoded objects.
Raises:
:py:class:`docker.errors.BuildError`
If there is an error during the build.
:py:class:`docker.errors.APIError`
If the server returns any other error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
...
def get(self, name): # -> Model:
"""Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def get_registry_data(self, name, auth_config=...): # -> RegistryData:
"""Gets the registry data for an image.
Args:
name (str): The name of the image.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self, name=..., all=..., filters=...): # -> list[Model]:
"""List images on the server.
Args:
name (str): Only show images belonging to the repository ``name``
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def load(self, data): # -> list[Model]:
"""Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def pull(self, repository, tag=..., all_tags=..., **kwargs): # -> Model | list[Model]:
"""Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If ``tag`` is ``None`` or empty, it is set to ``latest``.
If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox', all_tags=True)
"""
...
def push(self, repository, tag=..., **kwargs): ...
def remove(self, *args, **kwargs): ...
def search(self, *args, **kwargs): ...
def prune(self, filters=...): ...
def prune_builds(self, *args, **kwargs): ...
def normalize_platform(platform, engine_info): ...

View File

@@ -1,175 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Network(Model):
"""A Docker network."""
@property
def name(self): # -> None:
"""The name of the network."""
...
@property
def containers(self): # -> list[Unknown]:
"""The containers that are connected to the network, as a list of
:py:class:`~docker.models.containers.Container` objects.
"""
...
def connect(self, container, *args, **kwargs):
"""Connect a container to this network.
Args:
container (str): Container to connect to this network, as either
an ID, name, or :py:class:`~docker.models.containers.Container`
object.
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linkedto this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def disconnect(self, container, *args, **kwargs):
"""Disconnect a container from this network.
Args:
container (str): Container to disconnect from this network, as
either an ID, name, or
:py:class:`~docker.models.containers.Container` object.
force (bool): Force the container to disconnect from a network.
Default: ``False``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def remove(self):
"""Remove this network.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
class NetworkCollection(Collection):
"""Networks on the Docker server."""
model = Network
def create(self, name, *args, **kwargs): # -> Model:
"""Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(:py:class:`Network`): The network that was created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.networks.create("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.networks.create(
"network1",
driver="bridge",
ipam=ipam_config
)
"""
...
def get(self, network_id, *args, **kwargs): # -> Model:
"""Get a network by its ID.
Args:
network_id (str): The ID of the network.
verbose (bool): Retrieve the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
Returns:
(:py:class:`Network`) The network.
Raises:
:py:class:`docker.errors.NotFound`
If the network does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self, *args, **kwargs): # -> list[Model]:
"""List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def prune(self, filters=...): ...

View File

@@ -1,95 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Node(Model):
"""A node in a swarm."""
id_attribute = ...
@property
def version(self):
"""The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
...
def update(self, node_spec):
"""Update the node's configuration.
Args:
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> node.update(node_spec)
"""
...
def remove(self, force=...):
"""Remove this node from the swarm.
Args:
force (bool): Force remove an active node. Default: `False`
Returns:
`True` if the request was successful.
Raises:
:py:class:`docker.errors.NotFound`
If the node doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
class NodeCollection(Collection):
"""Nodes on the Docker server."""
model = Node
def get(self, node_id): # -> Model:
"""Get a node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A :py:class:`Node` object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self, *args, **kwargs): # -> list[Model]:
"""List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
"""
...

View File

@@ -1,152 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Plugin(Model):
"""A plugin on the server."""
def __repr__(self): ...
@property
def name(self): # -> None:
"""The plugin's name."""
...
@property
def enabled(self): # -> None:
"""Whether the plugin is enabled."""
...
@property
def settings(self): # -> None:
"""A dictionary representing the plugin's configuration."""
...
def configure(self, options): # -> None:
"""Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def disable(self, force=...): # -> None:
"""Disable the plugin.
Args:
force (bool): Force disable. Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def enable(self, timeout=...): # -> None:
"""Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def push(self):
"""Push the plugin to a remote registry.
Returns:
A dict iterator streaming the status of the upload.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def remove(self, force=...):
"""Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def upgrade(self, remote=...): # -> Generator[Unknown, Unknown, None]:
"""Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs
"""
...
class PluginCollection(Collection):
model = Plugin
def create(self, name, plugin_data_dir, gzip=...): # -> Model:
"""Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
...
def get(self, name): # -> Model:
"""Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def install(self, remote_name, local_name=...): # -> Model:
"""Pull and install a plugin.
Args:
remote_name (string): Remote reference for the plugin to
install. The ``:latest`` tag is optional, and is the
default if omitted.
local_name (string): Local name for the pulled plugin.
The ``:latest`` tag is optional, and is the default if
omitted. Optional.
Returns:
(:py:class:`Plugin`): The installed plugin
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self): # -> list[Model]:
"""List plugins installed on the server.
Returns:
(list of :py:class:`Plugin`): The plugins.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,38 +0,0 @@
"""This type stub file was generated by pyright."""
class Model:
"""A base class for representing a single object on the server."""
id_attribute = ...
def __init__(self, attrs=..., client=..., collection=...) -> None: ...
def __repr__(self): ...
def __eq__(self, other) -> bool: ...
def __hash__(self) -> int: ...
@property
def id(self): # -> None:
"""The ID of the object."""
...
@property
def short_id(self):
"""The ID of the object, truncated to 12 characters."""
...
def reload(self): # -> None:
"""Load this object from the server again and update ``attrs`` with the
new data.
"""
...
class Collection:
"""A base class for representing all objects of a particular type on the
server.
"""
model = ...
def __init__(self, client=...) -> None: ...
def __call__(self, *args, **kwargs): ...
def list(self): ...
def get(self, key): ...
def create(self, attrs=...): ...
def prepare_model(self, attrs): # -> Model:
"""Create a model from a set of attributes."""
...

View File

@@ -1,56 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Secret(Model):
"""A secret."""
id_attribute = ...
def __repr__(self): ...
@property
def name(self): ...
def remove(self):
"""Remove this secret.
Raises:
:py:class:`docker.errors.APIError`
If secret failed to remove.
"""
...
class SecretCollection(Collection):
"""Secrets on the Docker server."""
model = Secret
def create(self, **kwargs): ...
def get(self, secret_id): # -> Model:
"""Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self, **kwargs): # -> list[Model]:
"""List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,227 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Service(Model):
"""A service."""
id_attribute = ...
@property
def name(self):
"""The service's name."""
...
@property
def version(self):
"""The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
...
def remove(self):
"""Stop and remove the service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def tasks(self, filters=...):
"""List the tasks in this service.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``node``,
``label``, and ``desired-state``.
Returns:
:py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def update(self, **kwargs):
"""Update a service's configuration. Similar to the ``docker service
update`` command.
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def logs(self, **kwargs):
"""Get log stream for the service.
Note: This method works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
Returns:
generator: Logs for the service.
"""
...
def scale(self, replicas):
"""Scale service container.
Args:
replicas (int): The number of containers that should be running.
Returns:
bool: ``True`` if successful.
"""
...
def force_update(self):
"""Force update the service even if no changes require it.
Returns:
bool: ``True`` if successful.
"""
...
class ServiceCollection(Collection):
"""Services on the Docker server."""
model = Service
def create(self, image, command=..., **kwargs): # -> Model:
"""Create a service. Similar to the ``docker service create`` command.
Args:
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
constraints (list of str): :py:class:`~docker.types.Placement`
constraints.
preferences (list of tuple): :py:class:`~docker.types.Placement`
preferences.
maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
or (int) representing maximum number of replicas per node.
platforms (list of tuple): A list of platform constraints
expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
init (boolean): Run an init inside the container that forwards
signals and reaps processes
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
log_driver (str): Log driver to use for containers.
log_driver_options (dict): Log driver options.
mode (ServiceMode): Scheduling mode for the service.
Default:``None``
mounts (list of str): Mounts for the containers, in the form
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
secrets (list of :py:class:`~docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
groups (:py:class:`list`): A list of additional groups that the
container process will run as.
open_stdin (boolean): Open ``stdin``
read_only (boolean): Mount the container's root filesystem as read
only.
stop_signal (string): Set signal to stop the service's containers
healthcheck (Healthcheck): Healthcheck
configuration for this service.
hosts (:py:class:`dict`): A set of host to IP mappings to add to
the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
configs (:py:class:`list`): List of
:py:class:`~docker.types.ConfigReference` that will be exposed
to the service.
privileges (Privileges): Security options for the service's
containers.
cap_add (:py:class:`list`): A list of kernel capabilities to add to
the default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop
from the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to the
container
Returns:
:py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def get(self, service_id, insert_defaults=...): # -> Model:
"""Get a service.
Args:
service_id (str): The ID of the service.
insert_defaults (boolean): If true, default values will be merged
into the output.
Returns:
:py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.InvalidVersion`
If one of the arguments is not supported with the current
API version.
"""
...
def list(self, **kwargs): # -> list[Model]:
"""List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns:
list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
CONTAINER_SPEC_KWARGS = ...
TASK_TEMPLATE_KWARGS = ...
CREATE_SERVICE_KWARGS = ...
PLACEMENT_KWARGS = ...

View File

@@ -1,143 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Model
class Swarm(Model):
"""The server's Swarm state. This a singleton that must be reloaded to get
the current state of the Swarm.
"""
id_attribute = ...
def __init__(self, *args, **kwargs) -> None: ...
@property
def version(self):
"""The version number of the swarm. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
...
def get_unlock_key(self): ...
def init(
self,
advertise_addr=...,
listen_addr=...,
force_new_cluster=...,
default_addr_pool=...,
subnet_size=...,
data_path_addr=...,
data_path_port=...,
**kwargs,
):
"""Initialize a new swarm on this Engine.
Args:
advertise_addr (str): Externally reachable address advertised to
other nodes. This can either be an address/port combination in
the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used.
If not specified, it will be automatically detected when
possible.
listen_addr (str): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
default_addr_pool (list of str): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_ca (dict): Configuration for forwarding signing requests
to an external certificate authority. Use
``docker.types.SwarmExternalCA``.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
"""
...
def join(self, *args, **kwargs): ...
def leave(self, *args, **kwargs): ...
def reload(self): # -> None:
"""Inspect the swarm on the server and store the response in
:py:attr:`attrs`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def unlock(self, key): ...
def update(self, rotate_worker_token=..., rotate_manager_token=..., rotate_manager_unlock_key=..., **kwargs):
"""Update the swarm's configuration.
It takes the same arguments as :py:meth:`init`, except
``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
addition, it takes these arguments:
Args:
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...

View File

@@ -1,85 +0,0 @@
"""This type stub file was generated by pyright."""
from .resource import Collection
from .resource import Model
class Volume(Model):
"""A volume."""
id_attribute = ...
@property
def name(self):
"""The name of the volume."""
...
def remove(self, force=...):
"""Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
...
class VolumeCollection(Collection):
"""Volumes on the Docker server."""
model = Volume
def create(self, name=..., **kwargs): # -> Model:
"""Create a volume.
Args:
name (str): Name of the volume. If not specified, the engine
generates a name.
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(:py:class:`Volume`): The volume created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.volumes.create(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
"""
...
def get(self, volume_id): # -> Model:
"""Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def list(self, **kwargs): # -> list[Unknown] | list[Model]:
"""List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
...
def prune(self, filters=...): ...

View File

@@ -1,5 +0,0 @@
from .npipeconn import NpipeHTTPAdapter as NpipeHTTPAdapter
from .npipesocket import NpipeSocket as NpipeSocket
from .sshconn import SSHHTTPAdapter as SSHHTTPAdapter
from .ssladapter import SSLHTTPAdapter as SSLHTTPAdapter
from .unixconn import UnixHTTPAdapter as UnixHTTPAdapter

View File

@@ -1,6 +0,0 @@
"""This type stub file was generated by pyright."""
import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self): ...

View File

@@ -1,20 +0,0 @@
"""This type stub file was generated by pyright."""
import urllib3
import urllib3.connection
from docker.transport.basehttpadapter import BaseHTTPAdapter
RecentlyUsedContainer = ...
class NpipeHTTPConnection(urllib3.connection.HTTPConnection):
def __init__(self, npipe_path, timeout=...) -> None: ...
def connect(self): ...
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=..., maxsize=...) -> None: ...
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = ...
def __init__(self, base_url, timeout=..., pool_connections=..., max_pool_size=...) -> None: ...
def get_connection(self, url, proxies=...): ...
def request_url(self, request, proxies): ...

View File

@@ -1,66 +0,0 @@
"""This type stub file was generated by pyright."""
import io
cERROR_PIPE_BUSY = ...
cSECURITY_SQOS_PRESENT = ...
cSECURITY_ANONYMOUS = ...
MAXIMUM_RETRY_COUNT = ...
def check_closed(f): ...
class NpipeSocket:
"""Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=...) -> None: ...
def accept(self): ...
def bind(self, address): ...
def close(self): ...
@check_closed
def connect(self, address, retry_count=...): ...
@check_closed
def connect_ex(self, address): ...
@check_closed
def detach(self): ...
@check_closed
def dup(self): ...
def getpeername(self): ...
def getsockname(self): ...
def getsockopt(self, level, optname, buflen=...): ...
def ioctl(self, control, option): ...
def listen(self, backlog): ...
def makefile(self, mode=..., bufsize=...): ...
@check_closed
def recv(self, bufsize, flags=...): ...
@check_closed
def recvfrom(self, bufsize, flags=...): ...
@check_closed
def recvfrom_into(self, buf, nbytes=..., flags=...): ...
@check_closed
def recv_into(self, buf, nbytes=...): ...
@check_closed
def send(self, string, flags=...): ...
@check_closed
def sendall(self, string, flags=...): ...
@check_closed
def sendto(self, string, address): ...
def setblocking(self, flag): ...
def settimeout(self, value): ...
def gettimeout(self): ...
def setsockopt(self, level, optname, value): ...
@check_closed
def shutdown(self, how): ...
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket) -> None: ...
def close(self): ...
def fileno(self): ...
def isatty(self): ...
def readable(self): ...
def readinto(self, buf): ...
def seekable(self): ...
def writable(self): ...

View File

@@ -1,32 +0,0 @@
"""This type stub file was generated by pyright."""
import socket
import urllib3
import urllib3.connection
from docker.transport.basehttpadapter import BaseHTTPAdapter
RecentlyUsedContainer = ...
class SSHSocket(socket.socket):
def __init__(self, host) -> None: ...
def connect(self, **kwargs): ...
def sendall(self, data): ...
def send(self, data): ...
def recv(self, n): ...
def makefile(self, mode): ...
def close(self): ...
class SSHConnection(urllib3.connection.HTTPConnection):
def __init__(self, ssh_transport=..., timeout=..., host=...) -> None: ...
def connect(self): ...
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = ...
def __init__(self, ssh_client=..., timeout=..., maxsize=..., host=...) -> None: ...
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = ...
def __init__(self, base_url, timeout=..., pool_connections=..., max_pool_size=..., shell_out=...) -> None: ...
def get_connection(self, url, proxies=...): ...
def close(self): ...

View File

@@ -1,26 +0,0 @@
"""This type stub file was generated by pyright."""
import urllib3
from docker.transport.basehttpadapter import BaseHTTPAdapter
""" Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
PoolManager = urllib3.poolmanager.PoolManager
class SSLHTTPAdapter(BaseHTTPAdapter):
"""An HTTPS Transport Adapter that uses an arbitrary SSL version."""
__attrs__ = ...
def __init__(self, ssl_version=..., assert_hostname=..., assert_fingerprint=..., **kwargs) -> None: ...
def init_poolmanager(self, connections, maxsize, block=...): ...
def get_connection(self, *args, **kwargs):
"""Ensure assert_hostname is set correctly on our pool.
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
...
def can_override_ssl_version(self): ...

View File

@@ -1,20 +0,0 @@
"""This type stub file was generated by pyright."""
import urllib3
import urllib3.connection
from docker.transport.basehttpadapter import BaseHTTPAdapter
RecentlyUsedContainer = ...
class UnixHTTPConnection(urllib3.connection.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=...) -> None: ...
def connect(self): ...
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=..., maxsize=...) -> None: ...
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = ...
def __init__(self, socket_url, timeout=..., pool_connections=..., max_pool_size=...) -> None: ...
def get_connection(self, url, proxies=...): ...
def request_url(self, request, proxies): ...

View File

@@ -1,30 +0,0 @@
from .containers import ContainerConfig as ContainerConfig
from .containers import DeviceRequest as DeviceRequest
from .containers import HostConfig as HostConfig
from .containers import LogConfig as LogConfig
from .containers import Ulimit as Ulimit
from .daemon import CancellableStream as CancellableStream
from .healthcheck import Healthcheck as Healthcheck
from .networks import EndpointConfig as EndpointConfig
from .networks import IPAMConfig as IPAMConfig
from .networks import IPAMPool as IPAMPool
from .networks import NetworkingConfig as NetworkingConfig
from .services import ConfigReference as ConfigReference
from .services import ContainerSpec as ContainerSpec
from .services import DNSConfig as DNSConfig
from .services import DriverConfig as DriverConfig
from .services import EndpointSpec as EndpointSpec
from .services import Mount as Mount
from .services import NetworkAttachmentConfig as NetworkAttachmentConfig
from .services import Placement as Placement
from .services import PlacementPreference as PlacementPreference
from .services import Privileges as Privileges
from .services import Resources as Resources
from .services import RestartPolicy as RestartPolicy
from .services import RollbackConfig as RollbackConfig
from .services import SecretReference as SecretReference
from .services import ServiceMode as ServiceMode
from .services import TaskTemplate as TaskTemplate
from .services import UpdateConfig as UpdateConfig
from .swarm import SwarmExternalCA as SwarmExternalCA
from .swarm import SwarmSpec as SwarmSpec

View File

@@ -1,4 +0,0 @@
"""This type stub file was generated by pyright."""
class DictType(dict):
def __init__(self, init) -> None: ...

View File

@@ -1,238 +0,0 @@
from typing import Any
from .base import DictType
class LogConfigTypesEnum:
_values = ...
class LogConfig(DictType):
"""Configure logging for a container, when provided as an argument to
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
You may refer to the
`official logging driver documentation <https://docs.docker.com/config/containers/logging/configure/>`_
for more information.
Args:
type (str): Indicate which log driver to use. A set of valid drivers
is provided as part of the :py:attr:`LogConfig.types`
enum. Other values may be accepted depending on the engine version
and available logging plugins.
config (dict): A driver-dependent configuration dictionary. Please
refer to the driver's documentation for a list of valid config
keys.
Example:
>>> from docker.types import LogConfig
>>> lc = LogConfig(type=LogConfig.types.JSON, config={
... 'max-size': '1g',
... 'labels': 'production_status,geo'
... })
>>> hc = client.create_host_config(log_config=lc)
>>> container = client.create_container('busybox', 'true',
... host_config=hc)
>>> client.inspect_container(container)['HostConfig']['LogConfig']
{'Type': 'json-file', 'Config': {'labels': 'production_status,geo', 'max-size': '1g'}}
"""
types = LogConfigTypesEnum
def __init__(self, **kwargs: Any) -> None: ...
@property
def type(self): ...
@type.setter
def type(self, value: Any): ...
@property
def config(self): ...
def set_config_value(self, key: str, value: Any) -> None:
"""Set a the value for ``key`` to ``value`` inside the ``config``
dict.
"""
...
def unset_config(self, key: str) -> None:
"""Remove the ``key`` property from the ``config`` dict."""
...
class Ulimit(DictType):
"""Create a ulimit declaration to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
name (str): Which ulimit will this apply to. The valid names can be
found in '/etc/security/limits.conf' on a gnu/linux system.
soft (int): The soft limit for this ulimit. Optional.
hard (int): The hard limit for this ulimit. Optional.
Example:
>>> nproc_limit = docker.types.Ulimit(name='nproc', soft=1024)
>>> hc = client.create_host_config(ulimits=[nproc_limit])
>>> container = client.create_container(
'busybox', 'true', host_config=hc
)
>>> client.inspect_container(container)['HostConfig']['Ulimits']
[{'Name': 'nproc', 'Hard': 0, 'Soft': 1024}]
"""
def __init__(self, **kwargs) -> None: ...
@property
def name(self): ...
@name.setter
def name(self, value): ...
@property
def soft(self): ...
@soft.setter
def soft(self, value): ...
@property
def hard(self): ...
@hard.setter
def hard(self, value): ...
class DeviceRequest(DictType):
"""Create a device request to be used with
:py:meth:`~docker.api.container.ContainerApiMixin.create_host_config`.
Args:
driver (str): Which driver to use for this device. Optional.
count (int): Number or devices to request. Optional.
Set to -1 to request all available devices.
device_ids (list): List of strings for device IDs. Optional.
Set either ``count`` or ``device_ids``.
capabilities (list): List of lists of strings to request
capabilities. Optional. The global list acts like an OR,
and the sub-lists are AND. The driver will try to satisfy
one of the sub-lists.
Available capabilities for the ``nvidia`` driver can be found
`here <https://github.com/NVIDIA/nvidia-container-runtime>`_.
options (dict): Driver-specific options. Optional.
"""
def __init__(
self,
count: int | None = ...,
driver: str | None = ...,
device_ids: list[str] | None = ...,
capabilities: list[list[str]] | None = ...,
options: dict[str, str] | None = ...,
) -> None: ...
@property
def driver(self) -> str: ...
@driver.setter
def driver(self, value: str) -> None: ...
@property
def count(self) -> int: ...
@count.setter
def count(self, value: int) -> None: ...
@property
def device_ids(self): ...
@device_ids.setter
def device_ids(self, value): ...
@property
def capabilities(self): ...
@capabilities.setter
def capabilities(self, value): ...
@property
def options(self): ...
@options.setter
def options(self, value): ...
class HostConfig(dict):
def __init__(
self,
version,
binds=...,
port_bindings=...,
lxc_conf=...,
publish_all_ports=...,
links=...,
privileged=...,
dns=...,
dns_search=...,
volumes_from=...,
network_mode=...,
restart_policy=...,
cap_add=...,
cap_drop=...,
devices=...,
extra_hosts=...,
read_only=...,
pid_mode=...,
ipc_mode=...,
security_opt=...,
ulimits=...,
log_config=...,
mem_limit=...,
memswap_limit=...,
mem_reservation=...,
kernel_memory=...,
mem_swappiness=...,
cgroup_parent=...,
group_add=...,
cpu_quota=...,
cpu_period=...,
blkio_weight=...,
blkio_weight_device=...,
device_read_bps=...,
device_write_bps=...,
device_read_iops=...,
device_write_iops=...,
oom_kill_disable=...,
shm_size=...,
sysctls=...,
tmpfs=...,
oom_score_adj=...,
dns_opt=...,
cpu_shares=...,
cpuset_cpus=...,
userns_mode=...,
uts_mode=...,
pids_limit=...,
isolation=...,
auto_remove=...,
storage_opt=...,
init=...,
init_path=...,
volume_driver=...,
cpu_count=...,
cpu_percent=...,
nano_cpus=...,
cpuset_mems=...,
runtime=...,
mounts=...,
cpu_rt_period=...,
cpu_rt_runtime=...,
device_cgroup_rules=...,
device_requests=...,
cgroupns=...,
) -> None: ...
def host_config_type_error(param, param_value, expected): ...
def host_config_version_error(param, version, less_than=...): ...
def host_config_value_error(param, param_value): ...
def host_config_incompatible_error(param, param_value, incompatible_param): ...
class ContainerConfig(dict):
def __init__(
self,
version,
image,
command,
hostname=...,
user=...,
detach=...,
stdin_open=...,
tty=...,
ports=...,
environment=...,
volumes=...,
network_disabled=...,
entrypoint=...,
working_dir=...,
domainname=...,
host_config=...,
mac_address=...,
labels=...,
stop_signal=...,
networking_config=...,
healthcheck=...,
stop_timeout=...,
runtime=...,
) -> None: ...

View File

@@ -1,21 +0,0 @@
"""This type stub file was generated by pyright."""
class CancellableStream:
"""Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
def __init__(self, stream, response) -> None: ...
def __iter__(self): ...
def __next__(self): ...
next = ...
def close(self): # -> None:
"""Closes the event streaming."""
...

View File

@@ -1,51 +0,0 @@
"""This type stub file was generated by pyright."""
from .base import DictType
class Healthcheck(DictType):
"""Defines a healthcheck configuration for a container or service.
Args:
test (:py:class:`list` or str): Test to perform to determine
container health. Possible values:
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
command.
interval (int): The time to wait between checks in nanoseconds. It
should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms).
retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
"""
def __init__(self, **kwargs) -> None: ...
@property
def test(self): ...
@test.setter
def test(self, value): ...
@property
def interval(self): ...
@interval.setter
def interval(self, value): ...
@property
def timeout(self): ...
@timeout.setter
def timeout(self, value): ...
@property
def retries(self): ...
@retries.setter
def retries(self, value): ...
@property
def start_period(self): ...
@start_period.setter
def start_period(self, value): ...

View File

@@ -1,66 +0,0 @@
"""This type stub file was generated by pyright."""
class EndpointConfig(dict):
def __init__(
self,
version,
aliases=...,
links=...,
ipv4_address=...,
ipv6_address=...,
link_local_ips=...,
driver_opt=...,
mac_address=...,
) -> None: ...
class NetworkingConfig(dict):
def __init__(self, endpoints_config=...) -> None: ...
class IPAMConfig(dict):
"""Create an IPAM (IP Address Management) config dictionary to be used with
:py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
Args:
driver (str): The IPAM driver to use. Defaults to ``default``.
pool_configs (:py:class:`list`): A list of pool configurations
(:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
options (dict): Driver options as a key-value dictionary.
Defaults to `None`.
Example:
>>> ipam_config = docker.types.IPAMConfig(driver='default')
>>> network = client.create_network('network1', ipam=ipam_config)
"""
def __init__(self, driver=..., pool_configs=..., options=...) -> None: ...
class IPAMPool(dict):
"""Create an IPAM pool config dictionary to be added to the
``pool_configs`` parameter of
:py:class:`~docker.types.IPAMConfig`.
Args:
subnet (str): Custom subnet for this IPAM pool using the CIDR
notation. Defaults to ``None``.
iprange (str): Custom IP range for endpoints in this IPAM pool using
the CIDR notation. Defaults to ``None``.
gateway (str): Custom IP address for the pool's gateway.
aux_addresses (dict): A dictionary of ``key -> ip_address``
relationships specifying auxiliary addresses that need to be
allocated by the IPAM driver.
Example:
>>> ipam_pool = docker.types.IPAMPool(
subnet='124.42.0.0/16',
iprange='124.42.0.0/24',
gateway='124.42.0.254',
aux_addresses={
'reserved1': '124.42.1.1'
}
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool])
"""
def __init__(self, subnet=..., iprange=..., gateway=..., aux_addresses=...) -> None: ...

View File

@@ -1,427 +0,0 @@
"""This type stub file was generated by pyright."""
from ..utils import check_resource
class TaskTemplate(dict):
"""Describe the task specification to be used when creating or updating a
service.
Args:
container_spec (ContainerSpec): Container settings for containers
started as part of this task.
log_driver (DriverConfig): Log configuration for containers created as
part of the service.
resources (Resources): Resource requirements which apply to each
individual container created as part of the service.
restart_policy (RestartPolicy): Specification for the restart policy
which applies to containers created as part of this service.
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`NetworkAttachmentConfig` to attach the service to.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
def __init__(
self,
container_spec,
resources=...,
restart_policy=...,
placement=...,
log_driver=...,
networks=...,
force_update=...,
) -> None: ...
@property
def container_spec(self): ...
@property
def resources(self): ...
@property
def restart_policy(self): ...
@property
def placement(self): ...
class ContainerSpec(dict):
"""Describes the behavior of containers that are part of a task, and is used
when declaring a :py:class:`~docker.types.TaskTemplate`.
Args:
image (string): The image name to use for the container.
command (string or list): The command to be run in the image.
args (:py:class:`list`): Arguments to the command.
hostname (string): The hostname to set on the container.
env (dict): Environment variables.
workdir (string): The working directory for commands to run in.
user (string): The user inside the container.
labels (dict): A map of labels to associate with the service.
mounts (:py:class:`list`): A list of specifications for mounts to be
added to containers created as part of the service. See the
:py:class:`~docker.types.Mount` class for details.
stop_grace_period (int): Amount of time to wait for the container to
terminate before forcefully killing it.
secrets (:py:class:`list`): List of :py:class:`SecretReference` to be
made available inside the containers.
tty (boolean): Whether a pseudo-TTY should be allocated.
groups (:py:class:`list`): A list of additional groups that the
container process will run as.
open_stdin (boolean): Open ``stdin``
read_only (boolean): Mount the container's root filesystem as read
only.
stop_signal (string): Set signal to stop the service's containers
healthcheck (Healthcheck): Healthcheck
configuration for this service.
hosts (:py:class:`dict`): A set of host to IP mappings to add to
the container's ``hosts`` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
configs (:py:class:`list`): List of :py:class:`ConfigReference` that
will be exposed to the service.
privileges (Privileges): Security options for the service's containers.
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
init (boolean): Run an init inside the container that forwards signals
and reaps processes.
cap_add (:py:class:`list`): A list of kernel capabilities to add to the
default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop from
the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to
the container
"""
def __init__(
self,
image,
command=...,
args=...,
hostname=...,
env=...,
workdir=...,
user=...,
labels=...,
mounts=...,
stop_grace_period=...,
secrets=...,
tty=...,
groups=...,
open_stdin=...,
read_only=...,
stop_signal=...,
healthcheck=...,
hosts=...,
dns_config=...,
configs=...,
privileges=...,
isolation=...,
init=...,
cap_add=...,
cap_drop=...,
sysctls=...,
) -> None: ...
class Mount(dict):
"""Describes a mounted folder's configuration inside a container. A list of
:py:class:`Mount` would be used as part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
target (string): Container path.
source (string): Mount source (e.g. a volume name or a host path).
type (string): The mount type (``bind`` / ``volume`` / ``tmpfs`` /
``npipe``). Default: ``volume``.
read_only (bool): Whether the mount should be read-only.
consistency (string): The consistency requirement for the mount. One of
``default```, ``consistent``, ``cached``, ``delegated``.
propagation (string): A propagation mode with the value ``[r]private``,
``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
no_copy (bool): False if the volume should be populated with the data
from the target. Default: ``False``. Only valid for the ``volume``
type.
labels (dict): User-defined name and labels for the volume. Only valid
for the ``volume`` type.
driver_config (DriverConfig): Volume driver configuration. Only valid
for the ``volume`` type.
tmpfs_size (int or string): The size for the tmpfs mount in bytes.
tmpfs_mode (int): The permission mode for the tmpfs mount.
"""
def __init__(
self,
target,
source,
type=...,
read_only=...,
consistency=...,
propagation=...,
no_copy=...,
labels=...,
driver_config=...,
tmpfs_size=...,
tmpfs_mode=...,
) -> None: ...
@classmethod
def parse_mount_string(cls, string): ...
class Resources(dict):
"""Configures resource allocation for containers when made part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
cpu_limit (int): CPU limit in units of 10^9 CPU shares.
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
generic_resources (dict or :py:class:`list`): Node level generic
resources, for example a GPU, using the following format:
``{ resource_name: resource_value }``. Alternatively, a list of
of resource specifications as defined by the Engine API.
"""
def __init__(
self, cpu_limit=..., mem_limit=..., cpu_reservation=..., mem_reservation=..., generic_resources=...
) -> None: ...
class UpdateConfig(dict):
"""Used to specify the way container updates should be performed by a service.
Args:
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
delay (int): Amount of time between updates, in nanoseconds.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
``continue``, ``pause``, as well as ``rollback`` since API v1.28.
Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
an update before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out an
updated task. Either ``start-first`` or ``stop-first`` are accepted.
"""
def __init__(
self, parallelism=..., delay=..., failure_action=..., monitor=..., max_failure_ratio=..., order=...
) -> None: ...
class RollbackConfig(UpdateConfig):
"""Used to specify the way container rollbacks should be performed by a
service.
Args:
parallelism (int): Maximum number of tasks to be rolled back in one
iteration (0 means unlimited parallelism). Default: 0
delay (int): Amount of time between rollbacks, in nanoseconds.
failure_action (string): Action to take if a rolled back task fails to
run, or stops running during the rollback. Acceptable values are
``continue``, ``pause`` or ``rollback``.
Default: ``continue``
monitor (int): Amount of time to monitor each rolled back task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
a rollback before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
order (string): Specifies the order of operations when rolling out a
rolled back task. Either ``start-first`` or ``stop-first`` are
accepted.
"""
class RestartConditionTypesEnum:
_values = ...
class RestartPolicy(dict):
"""Used when creating a :py:class:`~docker.types.ContainerSpec`,
dictates whether a container should restart after stopping or failing.
Args:
condition (string): Condition for restart (``none``, ``on-failure``,
or ``any``). Default: `none`.
delay (int): Delay between restart attempts. Default: 0
max_attempts (int): Maximum attempts to restart a given container
before giving up. Default value is 0, which is ignored.
window (int): Time window used to evaluate the restart policy. Default
value is 0, which is unbounded.
"""
condition_types = RestartConditionTypesEnum
def __init__(self, condition=..., delay=..., max_attempts=..., window=...) -> None: ...
class DriverConfig(dict):
"""Indicates which driver to use, as well as its configuration. Can be used
as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
for the `driver_config` in a volume :py:class:`~docker.types.Mount`, or
as the driver object in
:py:meth:`create_secret`.
Args:
name (string): Name of the driver to use.
options (dict): Driver-specific options. Default: ``None``.
"""
def __init__(self, name, options=...) -> None: ...
class EndpointSpec(dict):
"""Describes properties to access and load-balance a service.
Args:
mode (string): The mode of resolution to use for internal load
balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
``'vip'`` if not provided.
ports (dict): Exposed ports that this service is accessible on from the
outside, in the form of ``{ published_port: target_port }`` or
``{ published_port: <port_config_tuple> }``. Port config tuple format
is ``(target_port [, protocol [, publish_mode]])``.
Ports can only be provided if the ``vip`` resolution mode is used.
"""
def __init__(self, mode=..., ports=...) -> None: ...
def convert_service_ports(ports): ...
class ServiceMode(dict):
"""Indicate whether a service or a job should be deployed as a replicated
or global service, and associated parameters.
Args:
mode (string): Can be either ``replicated``, ``global``,
``replicated-job`` or ``global-job``
replicas (int): Number of replicas. For replicated services only.
concurrency (int): Number of concurrent jobs. For replicated job
services only.
"""
def __init__(self, mode, replicas=..., concurrency=...) -> None: ...
@property
def replicas(self): ...
class SecretReference(dict):
"""Secret reference to be used as part of a :py:class:`ContainerSpec`.
Describes how a secret is made accessible inside the service's
containers.
Args:
secret_id (string): Secret's ID
secret_name (string): Secret's name as defined at its creation.
filename (string): Name of the file containing the secret. Defaults
to the secret's name if not specified.
uid (string): UID of the secret file's owner. Default: 0
gid (string): GID of the secret file's group. Default: 0
mode (int): File access mode inside the container. Default: 0o444
"""
@check_resource("secret_id")
def __init__(self, secret_id, secret_name, filename=..., uid=..., gid=..., mode=...) -> None: ...
class ConfigReference(dict):
"""Config reference to be used as part of a :py:class:`ContainerSpec`.
Describes how a config is made accessible inside the service's
containers.
Args:
config_id (string): Config's ID
config_name (string): Config's name as defined at its creation.
filename (string): Name of the file containing the config. Defaults
to the config's name if not specified.
uid (string): UID of the config file's owner. Default: 0
gid (string): GID of the config file's group. Default: 0
mode (int): File access mode inside the container. Default: 0o444
"""
@check_resource("config_id")
def __init__(self, config_id, config_name, filename=..., uid=..., gid=..., mode=...) -> None: ...
class Placement(dict):
"""Placement constraints to be used as part of a :py:class:`TaskTemplate`.
Args:
constraints (:py:class:`list` of str): A list of constraints
preferences (:py:class:`list` of tuple): Preferences provide a way
to make the scheduler aware of factors such as topology. They
are provided in order from highest to lowest precedence and
are expressed as ``(strategy, descriptor)`` tuples. See
:py:class:`PlacementPreference` for details.
maxreplicas (int): Maximum number of replicas per node
platforms (:py:class:`list` of tuple): A list of platforms
expressed as ``(arch, os)`` tuples
"""
def __init__(self, constraints=..., preferences=..., platforms=..., maxreplicas=...) -> None: ...
class PlacementPreference(dict):
"""Placement preference to be used as an element in the list of
preferences for :py:class:`Placement` objects.
Args:
strategy (string): The placement strategy to implement. Currently,
the only supported strategy is ``spread``.
descriptor (string): A label descriptor. For the spread strategy,
the scheduler will try to spread tasks evenly over groups of
nodes identified by this label.
"""
def __init__(self, strategy, descriptor) -> None: ...
class DNSConfig(dict):
"""Specification for DNS related configurations in resolver configuration
file (``resolv.conf``). Part of a :py:class:`ContainerSpec` definition.
Args:
nameservers (:py:class:`list`): The IP addresses of the name
servers.
search (:py:class:`list`): A search list for host-name lookup.
options (:py:class:`list`): A list of internal resolver variables
to be modified (e.g., ``debug``, ``ndots:3``, etc.).
"""
def __init__(self, nameservers=..., search=..., options=...) -> None: ...
class Privileges(dict):
r"""Security options for a service's containers.
Part of a :py:class:`ContainerSpec` definition.
Args:
credentialspec_file (str): Load credential spec from this file.
The file is read by the daemon, and must be present in the
CredentialSpecs subdirectory in the docker data directory,
which defaults to ``C:\ProgramData\Docker\`` on Windows.
Can not be combined with credentialspec_registry.
credentialspec_registry (str): Load credential spec from this value
in the Windows registry. The specified registry value must be
located in: ``HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion
\Virtualization\Containers\CredentialSpecs``.
Can not be combined with credentialspec_file.
selinux_disable (boolean): Disable SELinux
selinux_user (string): SELinux user label
selinux_role (string): SELinux role label
selinux_type (string): SELinux type label
selinux_level (string): SELinux level label
"""
def __init__(
self,
credentialspec_file=...,
credentialspec_registry=...,
selinux_disable=...,
selinux_user=...,
selinux_role=...,
selinux_type=...,
selinux_level=...,
) -> None: ...
class NetworkAttachmentConfig(dict):
"""Network attachment options for a service.
Args:
target (str): The target network for attachment.
Can be a network name or ID.
aliases (:py:class:`list`): A list of discoverable alternate names
for the service.
options (:py:class:`dict`): Driver attachment options for the
network target.
"""
def __init__(self, target, aliases=..., options=...) -> None: ...

View File

@@ -1,48 +0,0 @@
"""This type stub file was generated by pyright."""
class SwarmSpec(dict):
"""Describe a Swarm's configuration and options. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec`
to instantiate.
"""
def __init__(
self,
version,
task_history_retention_limit=...,
snapshot_interval=...,
keep_old_snapshots=...,
log_entries_for_slow_followers=...,
heartbeat_tick=...,
election_tick=...,
dispatcher_heartbeat_period=...,
node_cert_expiry=...,
external_cas=...,
name=...,
labels=...,
signing_ca_cert=...,
signing_ca_key=...,
ca_force_rotate=...,
autolock_managers=...,
log_driver=...,
) -> None: ...
class SwarmExternalCA(dict):
"""Configuration for forwarding signing requests to an external
certificate authority.
Args:
url (string): URL where certificate signing requests should be
sent.
protocol (string): Protocol for communication with the external CA.
options (dict): An object with key/value pairs that are interpreted
as protocol-specific options for the external CA driver.
ca_cert (string): The root CA certificate (in PEM format) this
external CA uses to issue TLS certificates (assumed to be to
the current swarm root CA certificate if not provided).
"""
def __init__(self, url, protocol=..., options=..., ca_cert=...) -> None: ...

View File

View File

@@ -1,31 +0,0 @@
"""This type stub file was generated by pyright."""
_SEP = ...
def tar(path, exclude=..., dockerfile=..., fileobj=..., gzip=...): ...
def exclude_paths(root, patterns, dockerfile=...): # -> set[Unknown]:
"""Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
...
def build_file_list(root): ...
def create_archive(root, files=..., fileobj=..., gzip=..., extra_files=...): ...
def mkbuildcontext(dockerfile): ...
def split_path(p): ...
def normalize_slashes(p): ...
def walk(root, patterns, default=...): ...
class PatternMatcher:
def __init__(self, patterns) -> None: ...
def matches(self, filepath): ...
def walk(self, root): ...
class Pattern:
def __init__(self, pattern_str) -> None: ...
@classmethod
def normalize(cls, p): ...
def match(self, filepath): ...

View File

@@ -1,15 +0,0 @@
"""This type stub file was generated by pyright."""
DOCKER_CONFIG_FILENAME = ...
LEGACY_DOCKER_CONFIG_FILENAME = ...
log = ...
def find_config_file(config_path=...): ...
def config_path_from_environment(): ...
def home_dir(): # -> str:
"""Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
...
def load_general_config(config_path=...): ...

View File

@@ -1,5 +0,0 @@
"""This type stub file was generated by pyright."""
def check_resource(resource_name): ...
def minimum_version(version): ...
def update_headers(f): ...

View File

@@ -1,47 +0,0 @@
"""This type stub file was generated by pyright."""
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = ...
_MAXCACHE = ...
def fnmatch(name, pat): # -> bool:
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
...
def fnmatchcase(name, pat): # -> bool:
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
...
def translate(pat): # -> LiteralString | str:
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
...

View File

@@ -1,34 +0,0 @@
"""This type stub file was generated by pyright."""
json_decoder = ...
def stream_as_text(stream): # -> Generator[Unknown | str, Any, None]:
"""Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
...
def json_splitter(buffer): # -> tuple[Any, Unknown] | None:
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
...
def json_stream(stream): # -> Generator[Any, Any, None]:
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
...
def line_splitter(buffer, separator=...): ...
def split_buffer(stream, splitter=..., decoder=...): # -> Generator[Unknown | str, Any, None]:
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
...

View File

@@ -1,32 +0,0 @@
"""This type stub file was generated by pyright."""
class ProxyConfig(dict):
"""Hold the client's proxy configuration."""
@property
def http(self): ...
@property
def https(self): ...
@property
def ftp(self): ...
@property
def no_proxy(self): ...
@staticmethod
def from_dict(config): # -> ProxyConfig:
"""Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
"""
...
def get_environment(self): # -> dict[Unknown, Unknown]:
"""Return a dictionary representing the environment variables used to
set the proxy settings.
"""
...
def inject_proxy_environment(self, environment): # -> list[Unknown | str]:
"""Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
"""
...

View File

@@ -1,67 +0,0 @@
"""This type stub file was generated by pyright."""
STDOUT = ...
STDERR = ...
class SocketError(Exception): ...
NPIPE_ENDED = ...
def read(socket, n=...):
"""Reads at most n bytes from socket."""
...
def read_exactly(socket, n): # -> bytes:
"""Reads exactly n bytes from socket
Raises SocketError if there isn't enough data.
"""
...
def next_frame_header(socket): # -> tuple[Literal[-1], Literal[-1]] | tuple[Any, Any]:
"""Returns the stream and size of the next frame of data waiting to be read
from socket, according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
...
def frames_iter(
socket, tty
): # -> Generator[tuple[Literal[1], Unknown], None, None] | Generator[tuple[Any | Literal[-1], Unknown], Any, None]:
"""Return a generator of frames read from socket. A frame is a tuple where
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
...
def frames_iter_no_tty(socket): # -> Generator[tuple[Any | Literal[-1], Unknown], Any, None]:
"""Returns a generator of data read from the socket when the tty setting is
not enabled.
"""
...
def frames_iter_tty(socket): # -> Generator[Unknown, Any, None]:
"""Return a generator of data read from the socket when the tty setting is
enabled.
"""
...
def consume_socket_output(frames, demux=...): # -> bytes | tuple[None, ...]:
"""Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
...
def demux_adaptor(stream_id, data): # -> tuple[Unknown, None] | tuple[None, Unknown]:
"""Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
...

View File

@@ -1,48 +0,0 @@
"""This type stub file was generated by pyright."""
URLComponents = ...
def create_ipam_pool(*args, **kwargs): ...
def create_ipam_config(*args, **kwargs): ...
def decode_json_header(header): ...
def compare_version(v1, v2): # -> Literal[0, -1, 1]:
"""Compare docker versions.
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
...
def version_lt(v1, v2): ...
def version_gte(v1, v2): ...
def convert_port_bindings(port_bindings): ...
def convert_volume_binds(binds): ...
def convert_tmpfs_mounts(tmpfs): ...
def convert_service_networks(networks): ...
def parse_repository_tag(repo_name): ...
def parse_host(addr, is_win32=..., tls=...): ...
def parse_devices(devices): ...
def kwargs_from_env(ssl_version=..., assert_hostname=..., environment=...): ...
def convert_filters(filters): ...
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp."""
...
def parse_bytes(s): ...
def normalize_links(links): ...
def parse_env_file(env_file): # -> dict[Unknown, Unknown]:
"""Reads a line-separated environment file.
The format of each line should be "key=value".
"""
...
def split_command(command): ...
def format_environment(environment): ...
def format_extra_hosts(extra_hosts, task=...): ...
def create_host_config(self, *args, **kwargs): ...

View File

@@ -1,6 +1,12 @@
from .formats import NOTEBOOK_EXTENSIONS, get_format_implementation, guess_format
from typing import Any
from .jupytext import read, reads, write, writes
from .formats import NOTEBOOK_EXTENSIONS
from .formats import get_format_implementation
from .formats import guess_format
from .jupytext import read
from .jupytext import reads
from .jupytext import write
from .jupytext import writes
def load_jupyter_server_extension(app: Any) -> None: ...

26
typings/jupytext/config.pyi generated Normal file
View File

@@ -0,0 +1,26 @@
"""Find and read Jupytext configuration files"""
from traitlets import Bool
from traitlets import Enum
from traitlets import Float
from traitlets import Unicode
from traitlets import Union
from traitlets.config import Configurable
class JupytextConfiguration(Configurable):
"""Jupytext Configuration's options"""
formats: Union = ...
default_jupytext_formats: Unicode = ...
cell_metadata_filter: Unicode = ...
default_cell_metadata_filter: Unicode = ...
comment_magics: Enum = ...
split_at_heading: Bool = ...
sphinx_convert_rst2md: Bool = ...
doxygen_equation_markers: Bool = ...
outdated_text_notebook_margin: Float = ...
cm_config_log_level: Enum = ...
cell_markers: Unicode = ...
default_cell_markers: Unicode = ...
notebook_extensions: Union | Unicode = ...
custom_cell_magics: Unicode = ...

View File

@@ -13,14 +13,14 @@ class NotebookFormatDescription:
def __init__(
self,
format_name,
extension,
header_prefix,
cell_reader_class,
cell_exporter_class,
current_version_number,
header_suffix=...,
min_readable_version_number=...,
format_name: str,
extension: str,
header_prefix: str,
cell_reader_class: Any,
cell_exporter_class: Any,
current_version_number: int,
header_suffix: str = ...,
min_readable_version_number: int = ...,
) -> None: ...
JUPYTEXT_FORMATS: list[NotebookFormatDescription] = ...
@@ -31,75 +31,6 @@ def get_format_implementation(ext: str, format_name: str = ...) -> NotebookForma
"""Return the implementation for the desired format"""
...
def read_metadata(text: str, ext: str) -> Any:
"""Return the header metadata"""
...
def read_format_from_metadata(text: str, ext: str) -> str | None:
"""Return the format of the file, when that information is available from the metadata"""
...
def guess_format(text: str, ext: str) -> tuple[str, dict[str, Any]]:
"""Guess the format and format options of the file, given its extension and content"""
...
def divine_format(text): # -> Literal['ipynb', 'md']:
"""Guess the format of the notebook, based on its content #148"""
...
def check_file_version(notebook, source_path, outputs_path): # -> None:
"""Raise if file version in source file would override outputs"""
...
def format_name_for_ext(metadata, ext, cm_default_formats=..., explicit_default=...): # -> str | None:
"""Return the format name for that extension"""
...
def identical_format_path(fmt1, fmt2): # -> bool:
"""Do the two (long representation) of formats target the same file?"""
...
def update_jupytext_formats_metadata(metadata, new_format): # -> None:
"""Update the jupytext_format metadata in the Jupyter notebook"""
...
def rearrange_jupytext_metadata(metadata): # -> None:
"""Convert the jupytext_formats metadata entry to jupytext/formats, etc. See #91"""
...
def long_form_one_format(
jupytext_format, metadata=..., update=..., auto_ext_requires_language_info=...
): # -> dict[Unknown, Unknown]:
"""Parse 'sfx.py:percent' into {'suffix':'sfx', 'extension':'py', 'format_name':'percent'}"""
...
def long_form_multiple_formats(
jupytext_formats, metadata=..., auto_ext_requires_language_info=...
): # -> list[Unknown] | list[dict[Unknown, Unknown]]:
"""Convert a concise encoding of jupytext.formats to a list of formats, encoded as dictionaries"""
...
def short_form_one_format(jupytext_format):
"""Represent one jupytext format as a string"""
...
def short_form_multiple_formats(jupytext_formats): # -> LiteralString:
"""Convert jupytext formats, represented as a list of dictionaries, to a comma separated list"""
...
_VALID_FORMAT_INFO = ...
_BINARY_FORMAT_OPTIONS = ...
_VALID_FORMAT_OPTIONS = ...
_VALID_FORMAT_NAMES = ...
def validate_one_format(jupytext_format): # -> dict[Unknown, Unknown]:
"""Validate extension and options for the given format"""
...
def auto_ext_from_metadata(metadata): # -> str:
"""Script extension from notebook metadata"""
...
def check_auto_ext(fmt, metadata, option):
"""Replace the auto extension with the actual file extension, and raise a ValueError if it cannot be determined"""
...

View File

@@ -1,6 +1,10 @@
from typing import Any, IO
from nbformat.v4.rwbase import NotebookReader, NotebookWriter
from typing import IO
from typing import Any
from nbformat import NotebookNode
from nbformat.v4.rwbase import NotebookReader
from nbformat.v4.rwbase import NotebookWriter
from .config import JupytextConfiguration
class NotSupportedNBFormatVersion(NotImplementedError):

View File

@@ -1,2 +1,3 @@
from . import v4 as v4
from .notebooknode import NotebookNode as NotebookNode, from_dict as from_dict
from .notebooknode import NotebookNode as NotebookNode
from .notebooknode import from_dict as from_dict

View File

@@ -2,9 +2,11 @@
Can probably be replaced by types.SimpleNamespace from Python 3.3
"""
from typing import Any, Self
from typing import Any
from typing import Dict
from typing import Self
class Struct(dict[str, Any]):
class Struct(Dict[str, Any]):
"""A dict subclass with attribute style access.
This dict subclass has a a few extra features:

View File

@@ -1,35 +1,18 @@
"""The main API for the v4 notebook format."""
from .convert import downgrade
from .convert import upgrade
from .nbbase import nbformat
from .nbbase import nbformat_minor
from .nbbase import nbformat_schema
from .nbbase import new_code_cell
from .nbbase import new_markdown_cell
from .nbbase import new_notebook
from .nbbase import new_output
from .nbbase import new_raw_cell
from .nbbase import output_from_msg
from .nbjson import reads
from .nbjson import to_notebook
from .nbjson import writes
from .convert import downgrade as downgrade
from .convert import upgrade as upgrade
from .nbbase import nbformat as nbformat
from .nbbase import nbformat_minor as nbformat_minor
from .nbbase import nbformat_schema as nbformat_schema
from .nbbase import new_code_cell as new_code_cell
from .nbbase import new_markdown_cell as new_markdown_cell
from .nbbase import new_notebook as new_notebook
from .nbbase import new_output as new_output
from .nbbase import new_raw_cell as new_raw_cell
from .nbbase import output_from_msg as output_from_msg
from .nbjson import reads as reads
from .nbjson import to_notebook as to_notebook
from .nbjson import writes as writes
__all__ = [
"nbformat",
"nbformat_minor",
"nbformat_schema",
"new_code_cell",
"new_markdown_cell",
"new_raw_cell",
"new_notebook",
"new_output",
"output_from_msg",
"reads",
"writes",
"to_notebook",
"downgrade",
"upgrade",
]
reads_json = ...
writes_json = ...
to_notebook_json = ...
reads_json = reads
writes_json = writes
to_notebook_json = to_notebook

View File

@@ -1,95 +1,19 @@
"""
This type stub file was generated by pyright.
"""
from _typeshed import Incomplete
from nbformat import v3 as v3
from nbformat import validator as validator
"""Code for converting notebooks to and from v3."""
from .nbbase import NotebookNode as NotebookNode
from .nbbase import nbformat as nbformat
from .nbbase import nbformat_minor as nbformat_minor
from .nbbase import random_cell_id as random_cell_id
def upgrade(nb, from_version=..., from_minor=...):
"""Convert a notebook to latest v4.
Parameters
----------
nb : NotebookNode
The Python representation of the notebook to convert.
from_version : int
The original version of the notebook to convert.
from_minor : int
The original minor version of the notebook to convert (only relevant for v >= 3).
"""
...
def upgrade_cell(cell):
"""upgrade a cell from v3 to v4
heading cell:
- -> markdown heading
code cell:
- remove language metadata
- cell.input -> cell.source
- cell.prompt_number -> cell.execution_count
- update outputs
"""
...
def downgrade_cell(cell):
"""downgrade a cell from v4 to v3
code cell:
- set cell.language
- cell.input <- cell.source
- cell.prompt_number <- cell.execution_count
- update outputs
markdown cell:
- single-line heading -> heading cell
"""
...
_mime_map = ...
def to_mime_key(d):
"""convert dict with v3 aliases to plain mime-type keys"""
...
def from_mime_key(d): # -> dict[Unknown, Unknown]:
"""convert dict with mime-type keys to v3 aliases"""
...
def upgrade_output(output):
"""upgrade a single code cell output from v3 to v4
- pyout -> execute_result
- pyerr -> error
- output.type -> output.data.mime/type
- mime-type keys
- stream.stream -> stream.name
"""
...
def downgrade_output(output):
"""downgrade a single code cell output to v3 from v4
- pyout <- execute_result
- pyerr <- error
- output.data.mime/type -> output.type
- un-mime-type keys
- stream.stream <- stream.name
"""
...
def upgrade_outputs(outputs): # -> list[Unknown]:
"""upgrade outputs of a code cell from v3 to v4"""
...
def downgrade_outputs(outputs): # -> list[Unknown]:
"""downgrade outputs of a code cell to v3 from v4"""
...
def downgrade(nb):
"""Convert a v4 notebook to v3.
Parameters
----------
nb : NotebookNode
The Python representation of the notebook to convert.
"""
...
def upgrade(nb, from_version: Incomplete | None = ..., from_minor: Incomplete | None = ...): ...
def upgrade_cell(cell): ...
def downgrade_cell(cell): ...
def to_mime_key(d): ...
def from_mime_key(d): ...
def upgrade_output(output): ...
def downgrade_output(output): ...
def upgrade_outputs(outputs): ...
def downgrade_outputs(outputs): ...
def downgrade(nb): ...

View File

@@ -1,52 +1,14 @@
"""
This type stub file was generated by pyright.
"""
from _typeshed import Incomplete
from nbformat.notebooknode import NotebookNode as NotebookNode
"""Python API for composing notebook elements
nbformat: int
nbformat_minor: int
nbformat_schema: Incomplete
The Python representation of a notebook is a nested structure of
dictionary subclasses that support attribute access.
The functions in this module are merely helpers to build the structs
in the right form.
"""
from typing import Any
import nbformat
def validate(node, ref=...): # -> None:
"""validate a v4 node"""
...
def new_output(output_type, data=..., **kwargs): # -> NotebookNode:
"""Create a new output, to go in the ``cell.outputs`` list of a code cell."""
...
def output_from_msg(msg): # -> NotebookNode:
"""Create a NotebookNode for an output from a kernel's IOPub message.
Returns
-------
NotebookNode: the output as a notebook node.
Raises
------
ValueError: if the message is not an output message.
"""
...
def new_code_cell(source=..., **kwargs): # -> NotebookNode:
"""Create a new code cell"""
...
def new_markdown_cell(source: str = ..., **kwargs: Any) -> nbformat.NotebookNode:
"""Create a new markdown cell"""
...
def new_raw_cell(source=..., **kwargs): # -> NotebookNode:
"""Create a new raw cell"""
...
def new_notebook(**kwargs): # -> NotebookNode:
"""Create a new notebook"""
...
def validate(node, ref: Incomplete | None = ...): ...
def new_output(output_type, data: Incomplete | None = ..., **kwargs): ...
def output_from_msg(msg): ...
def new_code_cell(source: str = ..., **kwargs): ...
def new_markdown_cell(source: str = ..., **kwargs): ...
def new_raw_cell(source: str = ..., **kwargs): ...
def new_notebook(**kwargs): ...

View File

@@ -1,45 +1,26 @@
"""
This type stub file was generated by pyright.
"""
import json
from .rwbase import NotebookReader
from .rwbase import NotebookWriter
from _typeshed import Incomplete
from nbformat.notebooknode import from_dict as from_dict
"""Read and write notebooks in JSON format."""
from .rwbase import NotebookReader as NotebookReader
from .rwbase import NotebookWriter as NotebookWriter
from .rwbase import rejoin_lines as rejoin_lines
from .rwbase import split_lines as split_lines
from .rwbase import strip_transient as strip_transient
class BytesEncoder(json.JSONEncoder):
"""A JSON encoder that accepts b64 (and other *ascii*) bytestrings."""
def default(self, obj): # -> str | Any:
"""Get the default value of an object."""
...
def default(self, obj): ...
class JSONReader(NotebookReader):
"""A JSON notebook reader."""
def reads(self, s, **kwargs):
"""Read a JSON string into a Notebook object"""
...
def to_notebook(self, d, **kwargs):
"""Convert a disk-format notebook dict to in-memory NotebookNode
handles multi-line values as strings, scrubbing of transient values, etc.
"""
...
def reads(self, s, **kwargs): ...
def to_notebook(self, d, **kwargs): ...
class JSONWriter(NotebookWriter):
"""A JSON notebook writer."""
def writes(self, nb, **kwargs): ...
def writes(self, nb, **kwargs): # -> str:
"""Serialize a NotebookNode object as a JSON string"""
...
_reader = ...
_writer = ...
reads = ...
read = ...
to_notebook = ...
write = ...
writes = ...
reads: Incomplete
read: Incomplete
to_notebook: Incomplete
write: Incomplete
writes: Incomplete

View File

@@ -1,56 +1,11 @@
"""
This type stub file was generated by pyright.
"""
"""Base classes and utilities for readers and writers."""
def rejoin_lines(nb):
"""rejoin multiline text into strings
For reversing effects of ``split_lines(nb)``.
This only rejoins lines that have been split, so if text objects were not split
they will pass through unchanged.
Used when reading JSON files that may have been passed through split_lines.
"""
...
_non_text_split_mimes = ...
def split_lines(nb):
"""split likely multiline text into lists of strings
For file output more friendly to line-based VCS. ``rejoin_lines(nb)`` will
reverse the effects of ``split_lines(nb)``.
Used when writing JSON files.
"""
...
def strip_transient(nb):
"""Strip transient values that shouldn't be stored in files.
This should be called in *both* read and write.
"""
...
def rejoin_lines(nb): ...
def split_lines(nb): ...
def strip_transient(nb): ...
class NotebookReader:
"""A class for reading notebooks."""
def reads(self, s, **kwargs):
"""Read a notebook from a string."""
...
def read(self, fp, **kwargs):
"""Read a notebook from a file like object"""
...
def reads(self, s, **kwargs) -> None: ...
def read(self, fp, **kwargs): ...
class NotebookWriter:
"""A class for writing notebooks."""
def writes(self, nb, **kwargs):
"""Write a notebook to a string."""
...
def write(self, nb, fp, **kwargs):
"""Write a notebook to a file like object"""
...
def writes(self, nb, **kwargs) -> None: ...
def write(self, nb, fp, **kwargs): ...