diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ffd0cb68..604b953f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ ci: autoupdate_schedule: weekly - skip: [changelog-dry-run, mypy, yapf, eslint, prettier] + skip: [eslint, prettier, mypy] autofix_commit_msg: "ci: auto fixes from pre-commit.ci\n\nFor more information, see https://pre-commit.ci" autoupdate_commit_msg: "ci: pre-commit autoupdate [pre-commit.ci]" default_language_version: @@ -25,6 +25,10 @@ repos: - id: ruff verbose: true args: [--exit-non-zero-on-fix, --show-fixes] + types_or: [pyi, python3, jupyter] + - id: ruff-format + verbose: true + types: [pyi] - repo: https://github.com/econchick/interrogate rev: 1.5.0 hooks: @@ -47,35 +51,6 @@ repos: verbose: true files: \.[jt]sx?$ # *.js, *.jsx, *.ts and *.tsx types_or: [javascript, yaml, ts] - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.6.1 - hooks: - - id: mypy - verbose: true - exclude: | - (?x)^( - examples/.*| - tools/.*| - tests/.*| - openllm-python/src/openllm/playground/.*| - openllm-python/tests/.*| - openllm-client/src/openllm_client/pb.*| - .github/.*| - cz.py | - bench.py | - hatch_build.py - )$ - additional_dependencies: - - click==8.1.3 - - peft - - bentoml==1.1.1 - - build==0.10.0 - - transformers>=4.31.0 - - pandas-stubs - - types-psutil - - types-tabulate - - types-PyYAML - - types-protobuf - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: @@ -100,8 +75,9 @@ repos: - id: check-useless-excludes - repo: local hooks: - - id: changelog-dry-run - name: Running changelog dry-run - entry: hatch run changelog + - id: mypy + name: mypy language: system - files: CHANGELOG.md + always_run: true + pass_filenames: false + entry: mypy -m openllm_client diff --git a/compile.sh b/compile.sh index 596daadc..73ee5c64 100644 --- a/compile.sh +++ b/compile.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd) -pip install -U mypy==1.5.1 build==0.10.0 hatchling==1.18.0 hatch-vcs==0.3.0 hatch-fancy-pypi-readme==23.1.0 hatch-mypyc==0.16.0 -HATCH_VERBOSE=3 MYPY_CONFIG_FILE_DIR="${SCRIPT_DIR}" HATCH_BUILD_HOOKS_ENABLE=1 MYPYPATH="${SCRIPT_DIR}/typings" python -m build openllm-python -w -C--global-option=--verbose "$@" -HATCH_VERBOSE=3 MYPY_CONFIG_FILE_DIR="${SCRIPT_DIR}" HATCH_BUILD_HOOKS_ENABLE=1 MYPYPATH="${SCRIPT_DIR}/typings" python -m build openllm-core -w -C--global-option=--verbose "$@" -HATCH_VERBOSE=3 MYPY_CONFIG_FILE_DIR="${SCRIPT_DIR}" HATCH_BUILD_HOOKS_ENABLE=1 MYPYPATH="${SCRIPT_DIR}/typings" python -m build openllm-client -w -C--global-option=--verbose "$@" +pip install -U mypy==1.6.1 build==0.10.0 hatchling==1.18.0 hatch-vcs==0.3.0 hatch-fancy-pypi-readme==23.1.0 hatch-mypyc==0.16.0 +HATCH_VERBOSE=3 MYPY_CONFIG_FILE_DIR="${SCRIPT_DIR}" HATCH_BUILD_HOOKS_ENABLE=1 python -m build openllm-core -w -C--global-option=--verbose "$@" +HATCH_VERBOSE=3 MYPY_CONFIG_FILE_DIR="${SCRIPT_DIR}" HATCH_BUILD_HOOKS_ENABLE=1 python -m build openllm-python -w -C--global-option=--verbose "$@" hatch clean diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..3c5801dd --- /dev/null +++ b/mypy.ini @@ -0,0 +1,11 @@ +[mypy] +pretty = true +python_version = 3.8 +show_error_codes = true +strict = true +warn_unused_configs = True +ignore_missing_imports = true +check_untyped_defs = true +warn_unreachable = true +modules = openllm_client +files = openllm-client/src/openllm_client/__init__.pyi diff --git a/openllm-client/pyproject.toml b/openllm-client/pyproject.toml index 988573ec..eff8c7bc 100644 --- a/openllm-client/pyproject.toml +++ b/openllm-client/pyproject.toml @@ -104,36 +104,6 @@ exclude = [ "/generate-grpc-stubs", "/dev.Dockerfile", ] -[tool.hatch.build.targets.wheel.hooks.mypyc] -dependencies = [ - "hatch-mypyc==0.16.0", - "mypy==1.5.1", - # avoid https://github.com/pallets/click/issues/2558 - "click==8.1.3", - "bentoml==1.1.2", - "transformers>=4.32.1", - "pandas-stubs", - "types-psutil", - "types-tabulate", - "types-PyYAML", - "types-protobuf", -] -enable-by-default = false -include = ["src/openllm_client/__init__.py", "src/openllm_client/client.py"] -# NOTE: This is consistent with pyproject.toml -mypy-args = [ - "--strict", - # this is because all transient library doesn't have types - "--allow-subclassing-any", - "--check-untyped-defs", - "--ignore-missing-imports", - "--no-warn-return-any", - "--warn-unreachable", - "--no-warn-no-return", - "--no-warn-unused-ignores", -] -options = { verbose = true, strip_asserts = true, debug_level = "2", opt_level = "3", include_runtime_files = true } -require-runtime-dependencies = true [tool.hatch.metadata.hooks.fancy-pypi-readme] content-type = "text/markdown" diff --git a/openllm-client/src/openllm_client/__init__.py b/openllm-client/src/openllm_client/__init__.py index 1147e6e7..c6df8207 100644 --- a/openllm-client/src/openllm_client/__init__.py +++ b/openllm-client/src/openllm_client/__init__.py @@ -1,7 +1,2 @@ -from __future__ import annotations - from ._http import AsyncHTTPClient as AsyncHTTPClient from ._http import HTTPClient as HTTPClient - -# from ._grpc import GrpcClient as GrpcClient -# from ._grpc import AsyncGrpcClient as AsyncGrpcClient diff --git a/openllm-client/src/openllm_client/__init__.pyi b/openllm-client/src/openllm_client/__init__.pyi new file mode 100644 index 00000000..7f87f48b --- /dev/null +++ b/openllm-client/src/openllm_client/__init__.pyi @@ -0,0 +1,76 @@ +from typing import Any +from typing import AsyncGenerator +from typing import Dict +from typing import Iterator +from typing import List +from typing import Optional +from typing import Union + +import attr as _attr + +from ._schemas import Response as _Response +from ._schemas import StreamingResponse as _StreamingResponse + +@_attr.define +class HTTPClient: + address: str + client_args: Dict[str, Any] + @staticmethod + def wait_until_server_ready(addr: str, timeout: float = ..., verify: bool = ..., check_interval: int = ..., **client_args: Any) -> None: ... + def __init__(self, address: Optional[str] = ..., timeout: int = ..., verify: bool = ..., api_version: str = ..., **client_args: Any) -> None: ... + @property + def is_ready(self) -> bool: ... + def health(self) -> None: ... + def query(self, prompt: str, **attrs: Any) -> _Response: ... + def generate( + self, + prompt: str, + llm_config: Optional[Dict[str, Any]] = ..., + stop: Optional[Union[str, List[str]]] = ..., + adapter_name: Optional[str] = ..., + timeout: Optional[int] = ..., + verify: Optional[bool] = ..., + **attrs: Any, + ) -> _Response: ... + def generate_stream( + self, + prompt: str, + llm_config: Optional[Dict[str, Any]] = ..., + stop: Optional[Union[str, List[str]]] = ..., + adapter_name: Optional[str] = ..., + timeout: Optional[int] = ..., + verify: Optional[bool] = ..., + **attrs: Any, + ) -> Iterator[_StreamingResponse]: ... + +@_attr.define +class AsyncHTTPClient: + address: str + client_args: Dict[str, Any] + @staticmethod + async def wait_until_server_ready(addr: str, timeout: float = ..., verify: bool = ..., check_interval: int = ..., **client_args: Any) -> None: ... + def __init__(self, address: Optional[str] = ..., timeout: int = ..., verify: bool = ..., api_version: str = ..., **client_args: Any) -> None: ... + @property + def is_ready(self) -> bool: ... + async def health(self) -> None: ... + async def query(self, prompt: str, **attrs: Any) -> _Response: ... + async def generate( + self, + prompt: str, + llm_config: Optional[Dict[str, Any]] = ..., + stop: Optional[Union[str, List[str]]] = ..., + adapter_name: Optional[str] = ..., + timeout: Optional[int] = ..., + verify: Optional[bool] = ..., + **attrs: Any, + ) -> _Response: ... + async def generate_stream( + self, + prompt: str, + llm_config: Optional[Dict[str, Any]] = ..., + stop: Optional[Union[str, List[str]]] = ..., + adapter_name: Optional[str] = ..., + timeout: Optional[int] = ..., + verify: Optional[bool] = ..., + **attrs: Any, + ) -> AsyncGenerator[_StreamingResponse, Any]: ... diff --git a/openllm-client/src/openllm_client/_http.py b/openllm-client/src/openllm_client/_http.py index 00f20a49..205f92f2 100644 --- a/openllm-client/src/openllm_client/_http.py +++ b/openllm-client/src/openllm_client/_http.py @@ -19,37 +19,38 @@ from ._schemas import StreamingResponse logger = logging.getLogger(__name__) -def _address_validator(_: t.Any, attr: attr.Attribute[t.Any], value: str) -> None: +def _address_validator(_, attr, value): if not isinstance(value, str): raise TypeError(f'{attr.name} must be a string') if not urlparse(value).netloc: raise ValueError(f'{attr.name} must be a valid URL') -def _address_converter(addr: str) -> str: +def _address_converter(addr: str): return addr if '://' in addr else 'http://' + addr class ServerState(enum.Enum): - # CLOSED: The server is not yet ready or `wait_until_server_ready` has not been called/failed. - CLOSED = 1 - # READY: The server is ready and `wait_until_server_ready` has been called. - READY = 2 + CLOSED = 1 # CLOSED: The server is not yet ready or `wait_until_server_ready` has not been called/failed. + READY = 2 # READY: The server is ready and `wait_until_server_ready` has been called. _object_setattr = object.__setattr__ @attr.define(init=False) class HTTPClient: address: str = attr.field(validator=_address_validator, converter=_address_converter) - client_args: t.Dict[str, t.Any] = attr.field() - _inner: httpx.Client = attr.field(repr=False) + client_args: t.Dict[str, t.Any] - _timeout: int = attr.field(default=30, repr=False) - _api_version: str = attr.field(default='v1', repr=False) - _verify: bool = attr.field(default=True, repr=False) - _state: ServerState = attr.field(default=ServerState.CLOSED, repr=False) + _inner: httpx.Client + _timeout: int = 30 + _api_version: str = 'v1' + _verify: bool = True + _state: ServerState = ServerState.CLOSED - __metadata: dict[str, t.Any] | None = attr.field(default=None, repr=False) - __config: dict[str, t.Any] | None = attr.field(default=None, repr=False) + __metadata: dict[str, t.Any] | None = None + __config: dict[str, t.Any] | None = None + + def __repr__(self): + return f'' @staticmethod - def wait_until_server_ready(addr: str, timeout: float = 30, verify: bool = False, check_interval: int = 1, **client_args: t.Any) -> None: + def wait_until_server_ready(addr, timeout=30, verify=False, check_interval=1, **client_args): addr = _address_converter(addr) logger.debug('Wait for server @ %s to be ready', addr) start = time.monotonic() @@ -71,12 +72,12 @@ class HTTPClient: logger.error(err) raise - def __init__(self, address: str | None = None, timeout: int = 30, verify: bool = False, api_version: str = 'v1', **client_args: t.Any) -> None: + def __init__(self, address=None, timeout=30, verify=False, api_version='v1', **client_args): if address is None: - env = os.environ.get('OPENLLM_ENDPOINT') + env = os.getenv('OPENLLM_ENDPOINT') if env is None: raise ValueError('address must be provided') address = env - self.__attrs_init__(address, client_args, httpx.Client(base_url=address, timeout=timeout, verify=verify, **client_args), timeout, api_version, verify) # type: ignore[attr-defined] + self.__attrs_init__(address, client_args, httpx.Client(base_url=address, timeout=timeout, verify=verify, **client_args), timeout, api_version, verify) def _metadata(self) -> dict[str, t.Any]: if self.__metadata is None: self.__metadata = self._inner.post(self._build_endpoint('metadata')).json() @@ -89,15 +90,20 @@ class HTTPClient: self.__config = {**config, **generation_config} return self.__config - # yapf: disable - def __del__(self)->None:self._inner.close() - def _build_endpoint(self,endpoint: str)->str:return ('/' if not self._api_version.startswith('/') else '')+f'{self._api_version}/{endpoint}' - @property - def is_ready(self)->bool:return self._state==ServerState.READY - def query(self, prompt: str, **attrs: t.Any)->Response: return self.generate(prompt,**attrs) - # yapf: enable + def __del__(self): + self._inner.close() - def health(self) -> None: + def _build_endpoint(self, endpoint): + return ('/' if not self._api_version.startswith('/') else '') + f'{self._api_version}/{endpoint}' + + @property + def is_ready(self): + return self._state == ServerState.READY + + def query(self, prompt, **attrs): + return self.generate(prompt, **attrs) + + def health(self): try: self.wait_until_server_ready(self.address, timeout=self._timeout, verify=self._verify, **self.client_args) _object_setattr(self, '_state', ServerState.READY) @@ -105,12 +111,12 @@ class HTTPClient: logger.error('Server is not healthy (Scroll up for traceback)\n%s', e) _object_setattr(self, '_state', ServerState.CLOSED) - def generate(self, prompt: str, llm_config: dict[str, t.Any] | None = None, stop: str | list[str] | None = None, adapter_name: str | None = None, **attrs: t.Any) -> Response: + def generate(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> Response: if not self.is_ready: self.health() if not self.is_ready: raise RuntimeError('Server is not ready. Check server logs for more information.') - timeout = attrs.pop('timeout', self._timeout) - verify = attrs.pop('verify', self._verify) + if timeout is None: timeout = self._timeout + if verify is None: verify = self._verify _meta, _config = self._metadata(), self._config() if llm_config is not None: llm_config = {**_config, **llm_config, **attrs} else: llm_config = {**_config, **attrs} @@ -122,17 +128,12 @@ class HTTPClient: if r.status_code != 200: raise ValueError("Failed to get generation from '/v1/generate'. Check server logs for more details.") return Response.model_construct(r.json()) - def generate_stream(self, - prompt: str, - llm_config: dict[str, t.Any] | None = None, - stop: str | list[str] | None = None, - adapter_name: str | None = None, - **attrs: t.Any) -> t.Iterator[StreamingResponse]: + def generate_stream(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> t.Iterator[StreamingResponse]: if not self.is_ready: self.health() if not self.is_ready: raise RuntimeError('Server is not ready. Check server logs for more information.') - timeout = attrs.pop('timeout', self._timeout) - verify = attrs.pop('verify', self._verify) + if timeout is None: timeout = self._timeout + if verify is None: verify = self._verify _meta, _config = self._metadata(), self._config() if llm_config is not None: llm_config = {**_config, **llm_config, **attrs} else: llm_config = {**_config, **attrs} @@ -156,19 +157,22 @@ class HTTPClient: @attr.define(init=False) class AsyncHTTPClient: address: str = attr.field(validator=_address_validator, converter=_address_converter) - client_args: t.Dict[str, t.Any] = attr.field() - _inner: httpx.AsyncClient = attr.field(repr=False) + client_args: t.Dict[str, t.Any] - _timeout: int = attr.field(default=30, repr=False) - _api_version: str = attr.field(default='v1', repr=False) - _verify: bool = attr.field(default=True, repr=False) - _state: ServerState = attr.field(default=ServerState.CLOSED, repr=False) + _inner: httpx.AsyncClient + _timeout: int = 30 + _api_version: str = 'v1' + _verify: bool = True + _state: ServerState = ServerState.CLOSED - __metadata: dict[str, t.Any] | None = attr.field(default=None, repr=False) - __config: dict[str, t.Any] | None = attr.field(default=None, repr=False) + __metadata: dict[str, t.Any] | None = None + __config: dict[str, t.Any] | None = None + + def __repr__(self): + return f'' @staticmethod - async def wait_until_server_ready(addr: str, timeout: float = 30, verify: bool = False, check_interval: int = 1, **client_args: t.Any) -> None: + async def wait_until_server_ready(addr, timeout=30, verify=False, check_interval=1, **client_args): addr = _address_converter(addr) logger.debug('Wait for server @ %s to be ready', addr) start = time.monotonic() @@ -190,9 +194,9 @@ class AsyncHTTPClient: logger.error(err) raise - def __init__(self, address: str | None = None, timeout: int = 30, verify: bool = False, api_version: str = 'v1', **client_args: t.Any) -> None: + def __init__(self, address=None, timeout=30, verify=False, api_version='v1', **client_args): if address is None: - env = os.environ.get('OPENLLM_ENDPOINT') + env = os.getenv('OPENLLM_ENDPOINT') if env is None: raise ValueError('address must be provided') address = env self.__attrs_init__(address, client_args, httpx.AsyncClient(base_url=address, timeout=timeout, verify=verify, **client_args), timeout, api_version, verify) @@ -208,13 +212,17 @@ class AsyncHTTPClient: self.__config = {**config, **generation_config} return self.__config - # yapf: disable - def _build_endpoint(self,endpoint:str) -> str: return '/'+f'{self._api_version}/{endpoint}' + def _build_endpoint(self, endpoint): + return '/' + f'{self._api_version}/{endpoint}' + @property - def is_ready(self)->bool:return self._state==ServerState.READY - async def query(self,prompt:str,**attrs: t.Any)->Response:return await self.generate(prompt,**attrs) - # yapf: enable - async def health(self) -> None: + def is_ready(self): + return self._state == ServerState.READY + + async def query(self, prompt, **attrs): + return await self.generate(prompt, **attrs) + + async def health(self): try: await self.wait_until_server_ready(self.address, timeout=self._timeout, verify=self._verify, **self.client_args) _object_setattr(self, '_state', ServerState.READY) @@ -222,12 +230,12 @@ class AsyncHTTPClient: logger.error('Server is not healthy (Scroll up for traceback)\n%s', e) _object_setattr(self, '_state', ServerState.CLOSED) - async def generate(self, prompt: str, llm_config: dict[str, t.Any] | None = None, stop: str | list[str] | None = None, adapter_name: str | None = None, **attrs: t.Any) -> Response: + async def generate(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> Response: if not self.is_ready: await self.health() if not self.is_ready: raise RuntimeError('Server is not ready. Check server logs for more information.') - timeout = attrs.pop('timeout', self._timeout) - verify = attrs.pop('verify', self._verify) + if timeout is None: timeout = self._timeout + if verify is None: verify = self._verify _meta, _config = await self._metadata(), await self._config() if llm_config is not None: llm_config = {**_config, **llm_config, **attrs} else: llm_config = {**_config, **attrs} @@ -239,17 +247,12 @@ class AsyncHTTPClient: if r.status_code != 200: raise ValueError("Failed to get generation from '/v1/generate'. Check server logs for more details.") return Response.model_construct(r.json()) - async def generate_stream(self, - prompt: str, - llm_config: dict[str, t.Any] | None = None, - stop: str | list[str] | None = None, - adapter_name: str | None = None, - **attrs: t.Any) -> t.AsyncGenerator[StreamingResponse, t.Any]: + async def generate_stream(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> t.AsyncGenerator[StreamingResponse, t.Any]: if not self.is_ready: await self.health() if not self.is_ready: raise RuntimeError('Server is not ready. Check server logs for more information.') - timeout = attrs.pop('timeout', self._timeout) - verify = attrs.pop('verify', self._verify) + if timeout is None: timeout = self._timeout + if verify is None: verify = self._verify _meta, _config = await self._metadata(), await self._config() if llm_config is not None: llm_config = {**_config, **llm_config, **attrs} else: llm_config = {**_config, **attrs} diff --git a/openllm-client/src/openllm_client/_schemas.py b/openllm-client/src/openllm_client/_schemas.py index 0951289e..de29c1be 100644 --- a/openllm-client/src/openllm_client/_schemas.py +++ b/openllm-client/src/openllm_client/_schemas.py @@ -12,11 +12,12 @@ class Request: stop: t.Optional[t.Union[str, t.List[str]]] = attr.field(default=None) adapter_name: t.Optional[str] = attr.field(default=None) - # yapf: disable - def model_dump_json(self)->dict[str, t.Any]:return cattr.unstructure(self) + def model_dump_json(self) -> t.Dict[str, t.Any]: + return cattr.unstructure(self) + @classmethod - def model_construct(cls,data:dict[str,t.Any])->Request: return cattr.structure(data,cls) - # yapf: enable + def model_construct(cls, data: t.Dict[str, t.Any]) -> Request: + return cattr.structure(data, cls) SampleLogprobs = t.List[t.Dict[int, float]] PromptLogprobs = t.List[t.Optional[t.Dict[int, float]]] @@ -40,11 +41,12 @@ class Response: prompt_token_ids: t.Optional[t.List[int]] = attr.field(default=None) prompt_logprobs: t.Optional[PromptLogprobs] = attr.field(default=None) - # yapf: disable - def model_dump_json(self)->dict[str, t.Any]:return cattr.unstructure(self) + def model_dump_json(self) -> t.Dict[str, t.Any]: + return cattr.unstructure(self) + @classmethod - def model_construct(cls,data:dict[str,t.Any])->Response: return cattr.structure(data,cls) - # yapf: enable + def model_construct(cls, data: t.Dict[str, t.Any]) -> Response: + return cattr.structure(data, cls) @attr.define class StreamingResponse: @@ -53,10 +55,13 @@ class StreamingResponse: text: str token_ids: int - # yapf: disable @classmethod - def from_response_chunk(cls,response:Response)->StreamingResponse:return cls(request_id=response.request_id,index=response.outputs[0].index,text=response.outputs[0].text,token_ids=response.outputs[0].token_ids[0]) - def model_dump_json(self)->dict[str, t.Any]:return cattr.unstructure(self) + def from_response_chunk(cls, response: Response) -> StreamingResponse: + return cls(request_id=response.request_id, index=response.outputs[0].index, text=response.outputs[0].text, token_ids=response.outputs[0].token_ids[0]) + + def model_dump_json(self) -> t.Dict[str, t.Any]: + return cattr.unstructure(self) + @classmethod - def model_construct(cls,data:dict[str,t.Any])->StreamingResponse: return cattr.structure(data,cls) - # yapf: enable + def model_construct(cls, data: t.Dict[str, t.Any]) -> StreamingResponse: + return cattr.structure(data, cls) diff --git a/openllm-core/src/openllm_core/_configuration.py b/openllm-core/src/openllm_core/_configuration.py index 4569712a..005418b2 100644 --- a/openllm-core/src/openllm_core/_configuration.py +++ b/openllm-core/src/openllm_core/_configuration.py @@ -230,12 +230,12 @@ class FineTuneConfig: @attr.frozen(slots=True, repr=False, init=False) class GenerationConfig(ReprMixin): - '''GenerationConfig is the attrs-compatible version of ``transformers.GenerationConfig``, with some additional validation and environment constructor. + """GenerationConfig is the attrs-compatible version of ``transformers.GenerationConfig``, with some additional validation and environment constructor. Note that we always set `do_sample=True`. This class is not designed to be used directly, rather to be used conjunction with LLMConfig. The instance of the generation config can then be accessed via ``LLMConfig.generation_config``. - ''' + """ max_new_tokens: int = dantic.Field(20, ge=0, description='The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.') min_length: int = dantic.Field( 0, @@ -368,14 +368,14 @@ converter.register_unstructure_hook_factory( @attr.frozen(slots=True, repr=False, init=False) class SamplingParams(ReprMixin): - '''SamplingParams is the attr-compatible version of ``vllm.SamplingParams``. It provides some utilities to also respect shared variables from ``openllm.LLMConfig``. + """SamplingParams is the attr-compatible version of ``vllm.SamplingParams``. It provides some utilities to also respect shared variables from ``openllm.LLMConfig``. The following value will be parsed directly from ``openllm.LLMConfig``: - temperature - top_k - top_p - max_tokens -> max_new_tokens - ''' + """ n: int = dantic.Field(1, description='Number of output sequences to return for the given prompt.') best_of: int = dantic.Field( None, @@ -438,7 +438,7 @@ class SamplingParams(ReprMixin): @classmethod def from_generation_config(cls, generation_config: GenerationConfig, **attrs: t.Any) -> Self: - '''The main entrypoint for creating a SamplingParams from ``openllm.LLMConfig``.''' + """The main entrypoint for creating a SamplingParams from ``openllm.LLMConfig``.""" if 'max_tokens' in attrs and 'max_new_tokens' in attrs: raise ValueError("Both 'max_tokens' and 'max_new_tokens' are passed. Make sure to only use one of them.") temperature = first_not_none(attrs.pop('temperature', None), default=generation_config['temperature']) top_k = first_not_none(attrs.pop('top_k', None), default=generation_config['top_k']) @@ -478,13 +478,13 @@ converter.register_structure_hook_factory(lambda cls: attr.has(cls) and lenient_ _object_getattribute = object.__getattribute__ class ModelSettings(t.TypedDict, total=False): - '''ModelSettings serve only for typing purposes as this is transcribed into LLMConfig.__config__. + """ModelSettings serve only for typing purposes as this is transcribed into LLMConfig.__config__. Note that all fields from this dictionary will then be converted to __openllm_*__ fields in LLMConfig. If the field below changes, make sure to run ./tools/update-config-stubs.py to generate correct __getitem__ stubs for type-checking purposes. - ''' + """ # NOTE: These required fields should be at the top, as it will be kw_only default_id: Required[str] @@ -534,7 +534,7 @@ _transformed_type: DictStrAny = {'fine_tune_strategies': t.Dict[AdapterType, Fin description=f'ModelSettings field for {k}.')) for k, ann in t.get_type_hints(ModelSettings).items() ]) class _ModelSettingsAttr: - '''Internal attrs representation of ModelSettings.''' + """Internal attrs representation of ModelSettings.""" def __getitem__(self, key: str) -> t.Any: if key in codegen.get_annotations(ModelSettings): return _object_getattribute(self, key) @@ -631,7 +631,7 @@ def _setattr_class(attr_name: str, value_var: t.Any) -> str: return f"setattr(cls, '{attr_name}', {value_var})" def _make_assignment_script(cls: type[LLMConfig], attributes: attr.AttrsInstance, _prefix: LiteralString = 'openllm') -> t.Callable[..., None]: - '''Generate the assignment script with prefix attributes __openllm___.''' + """Generate the assignment script with prefix attributes __openllm___.""" args: ListStr = [] globs: DictStrAny = {'cls': cls, '_cached_attribute': attributes, '_cached_getattribute_get': _object_getattribute.__get__} annotations: DictStrAny = {'return': None} @@ -651,14 +651,14 @@ _reserved_namespace = {'__config__', 'GenerationConfig', 'SamplingParams'} class _ConfigAttr: @staticmethod def Field(default: t.Any = None, **attrs: t.Any) -> t.Any: - '''Field is a alias to the internal dantic utilities to easily create + """Field is a alias to the internal dantic utilities to easily create attrs.fields with pydantic-compatible interface. For example: ```python class MyModelConfig(openllm.LLMConfig): field1 = openllm.LLMConfig.Field(...) ``` - ''' + """ return dantic.Field(default, **attrs) # NOTE: The following is handled via __init_subclass__, and is only used for TYPE_CHECKING @@ -715,7 +715,7 @@ class _ConfigAttr: to create arguments for vLLM LLMEngine that can be used throughout the lifecycle. This class will also be managed internally by OpenLLM.''' def __attrs_init__(self, *args: t.Any, **attrs: t.Any) -> None: - '''Generated __attrs_init__ for LLMConfig subclass that follows the attrs contract.''' + """Generated __attrs_init__ for LLMConfig subclass that follows the attrs contract.""" # NOTE: The following will be populated from __config__ and also # considered to be public API. Users can also access these via self[key] @@ -1393,7 +1393,7 @@ class LLMConfig(_ConfigAttr): @classmethod def model_construct_env(cls, **attrs: t.Any) -> Self: - '''A helpers that respect configuration values environment variables.''' + """A helpers that respect configuration values environment variables.""" attrs = {k: v for k, v in attrs.items() if v is not None} model_config = cls.__openllm_env__.config env_json_string = os.environ.get(model_config, None) @@ -1425,7 +1425,7 @@ class LLMConfig(_ConfigAttr): return converter.structure(config_from_env, cls) def model_validate_click(self, **attrs: t.Any) -> tuple[LLMConfig, DictStrAny]: - '''Parse given click attributes into a LLMConfig and return the remaining click attributes.''' + """Parse given click attributes into a LLMConfig and return the remaining click attributes.""" llm_config_attrs: DictStrAny = {'generation_config': {}, 'sampling_config': {}} key_to_remove: ListStr = [] for k, v in attrs.items(): diff --git a/openllm-core/src/openllm_core/_schemas.py b/openllm-core/src/openllm_core/_schemas.py index cc289c8c..75784ffe 100644 --- a/openllm-core/src/openllm_core/_schemas.py +++ b/openllm-core/src/openllm_core/_schemas.py @@ -1,4 +1,4 @@ -'''Schema definition for OpenLLM. This schema is used throughout openllm core components library.''' +"""Schema definition for OpenLLM. This schema is used throughout openllm core components library.""" from __future__ import annotations import typing as t diff --git a/openllm-core/src/openllm_core/_strategies.py b/openllm-core/src/openllm_core/_strategies.py index 45d6685a..a35fd916 100644 --- a/openllm-core/src/openllm_core/_strategies.py +++ b/openllm-core/src/openllm_core/_strategies.py @@ -35,7 +35,7 @@ class DynResource(t.Protocol): logger = logging.getLogger(__name__) def _strtoul(s: str) -> int: - '''Return -1 or positive integer sequence string starts with,.''' + """Return -1 or positive integer sequence string starts with,.""" if not s: return -1 idx = 0 for idx, c in enumerate(s): @@ -69,7 +69,7 @@ def _parse_visible_devices(default_var: str = ..., *, respect_env: t.Literal[Fal ... def _parse_visible_devices(default_var: str | None = None, respect_env: bool = True) -> list[str] | None: - '''CUDA_VISIBLE_DEVICES aware with default var for parsing spec.''' + """CUDA_VISIBLE_DEVICES aware with default var for parsing spec.""" if respect_env: spec = os.environ.get('CUDA_VISIBLE_DEVICES', default_var) if not spec: return None @@ -258,10 +258,10 @@ class CascadingResourceStrategy(bentoml.Strategy, ReprMixin): """ @classmethod def get_worker_count(cls, runnable_class: type[bentoml.Runnable], resource_request: dict[str, t.Any] | None, workers_per_resource: float) -> int: - '''Return the number of workers to be used for the given runnable class. + """Return the number of workers to be used for the given runnable class. Note that for all available GPU, the number of workers will always be 1. - ''' + """ if resource_request is None: resource_request = system_resources() # use NVIDIA kind = 'nvidia.com/gpu' @@ -288,14 +288,14 @@ class CascadingResourceStrategy(bentoml.Strategy, ReprMixin): @classmethod def get_worker_env(cls, runnable_class: type[bentoml.Runnable], resource_request: dict[str, t.Any] | None, workers_per_resource: int | float, worker_index: int) -> dict[str, t.Any]: - '''Get worker env for this given worker_index. + """Get worker env for this given worker_index. Args: runnable_class: The runnable class to be run. resource_request: The resource request of the runnable. workers_per_resource: # of workers per resource. worker_index: The index of the worker, start from 0. - ''' + """ cuda_env = os.environ.get('CUDA_VISIBLE_DEVICES', None) disabled = cuda_env in ('', '-1') environ: dict[str, t.Any] = {} diff --git a/openllm-core/src/openllm_core/exceptions.py b/openllm-core/src/openllm_core/exceptions.py index a288a1de..06bf4c9f 100644 --- a/openllm-core/src/openllm_core/exceptions.py +++ b/openllm-core/src/openllm_core/exceptions.py @@ -1,9 +1,9 @@ -'''Base exceptions for OpenLLM. This extends BentoML exceptions.''' +"""Base exceptions for OpenLLM. This extends BentoML exceptions.""" from __future__ import annotations from http import HTTPStatus class OpenLLMException(Exception): - '''Base class for all OpenLLM exceptions. This shares similar interface with BentoMLException.''' + """Base class for all OpenLLM exceptions. This shares similar interface with BentoMLException.""" error_code = HTTPStatus.INTERNAL_SERVER_ERROR @@ -12,22 +12,22 @@ class OpenLLMException(Exception): super().__init__(message) class GpuNotAvailableError(OpenLLMException): - '''Raised when there is no GPU available in given system.''' + """Raised when there is no GPU available in given system.""" class ValidationError(OpenLLMException): - '''Raised when a validation fails.''' + """Raised when a validation fails.""" class ForbiddenAttributeError(OpenLLMException): - '''Raised when using an _internal field.''' + """Raised when using an _internal field.""" class MissingAnnotationAttributeError(OpenLLMException): - '''Raised when a field under openllm.LLMConfig is missing annotations.''' + """Raised when a field under openllm.LLMConfig is missing annotations.""" class MissingDependencyError(BaseException): - '''Raised when a dependency is missing.''' + """Raised when a dependency is missing.""" class Error(BaseException): - '''To be used instead of naked raise.''' + """To be used instead of naked raise.""" class FineTuneStrategyNotSupportedError(OpenLLMException): - '''Raised when a fine-tune strategy is not supported for given LLM.''' + """Raised when a fine-tune strategy is not supported for given LLM.""" diff --git a/openllm-core/src/openllm_core/utils/__init__.py b/openllm-core/src/openllm_core/utils/__init__.py index c05b4b9f..2f59d63f 100644 --- a/openllm-core/src/openllm_core/utils/__init__.py +++ b/openllm-core/src/openllm_core/utils/__init__.py @@ -79,9 +79,9 @@ def resolve_user_filepath(filepath: str, ctx: str | None) -> str: @contextlib.contextmanager def reserve_free_port(host: str = 'localhost', port: int | None = None, prefix: str | None = None, max_retry: int = 50, enable_so_reuseport: bool = False,) -> t.Iterator[int]: - ''' + """ detect free port and reserve until exit the context - ''' + """ import psutil sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -139,7 +139,7 @@ def ensure_exec_coro(coro: t.Coroutine[t.Any, t.Any, t.Any]) -> t.Any: else: return loop.run_until_complete(coro) def available_devices() -> tuple[str, ...]: - '''Return available GPU under system. Currently only supports NVIDIA GPUs.''' + """Return available GPU under system. Currently only supports NVIDIA GPUs.""" from openllm_core._strategies import NvidiaGpuResource return tuple(NvidiaGpuResource.from_system()) @@ -263,10 +263,10 @@ _LOGGING_CONFIG: dict[str, t.Any] = { } def configure_logging() -> None: - '''Configure logging for OpenLLM. + """Configure logging for OpenLLM. Behaves similar to how BentoML loggers are being configured. - ''' + """ if get_quiet_mode(): _LOGGING_CONFIG['loggers']['openllm']['level'] = logging.ERROR _LOGGING_CONFIG['loggers']['bentoml']['level'] = logging.ERROR @@ -305,7 +305,7 @@ class suppress(contextlib.suppress, contextlib.ContextDecorator): """ def compose(*funcs: AnyCallable) -> AnyCallable: - '''Compose any number of unary functions into a single unary function. + """Compose any number of unary functions into a single unary function. >>> import textwrap >>> expected = str.strip(textwrap.dedent(compose.__doc__)) @@ -319,7 +319,7 @@ def compose(*funcs: AnyCallable) -> AnyCallable: >>> f = compose(round_three, int.__truediv__) >>> [f(3*x, x+1) for x in range(1,10)] [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] - ''' + """ def compose_two(f1: AnyCallable, f2: AnyCallable) -> AnyCallable: return lambda *args, **kwargs: f1(f2(*args, **kwargs)) @@ -349,12 +349,12 @@ def _text_in_file(text: str, filename: Path) -> bool: return any(text in line for line in filename.open()) def in_docker() -> bool: - '''Is this current environment running in docker? + """Is this current environment running in docker? ```python type(in_docker()) ``` - ''' + """ return _dockerenv.exists() or _text_in_file('docker', _cgroup) T = t.TypeVar('T') @@ -369,7 +369,7 @@ def first_not_none(*args: T | None, default: None | T = None) -> T | None: retur # yapf: enable def resolve_filepath(path: str, ctx: str | None = None) -> str: - '''Resolve a file path to an absolute path, expand user and environment variables.''' + """Resolve a file path to an absolute path, expand user and environment variables.""" try: return resolve_user_filepath(path, ctx) except FileNotFoundError: @@ -389,7 +389,7 @@ def generate_context(framework_name: str) -> ModelContext: _TOKENIZER_PREFIX = '_tokenizer_' def flatten_attrs(**attrs: t.Any) -> tuple[dict[str, t.Any], dict[str, t.Any]]: - '''Normalize the given attrs to a model and tokenizer kwargs accordingly.''' + """Normalize the given attrs to a model and tokenizer kwargs accordingly.""" tokenizer_attrs = {k[len(_TOKENIZER_PREFIX):]: v for k, v in attrs.items() if k.startswith(_TOKENIZER_PREFIX)} for k in tuple(attrs.keys()): if k.startswith(_TOKENIZER_PREFIX): del attrs[k] diff --git a/openllm-core/src/openllm_core/utils/analytics.py b/openllm-core/src/openllm_core/utils/analytics.py index a4e355ce..ba0f8bd5 100644 --- a/openllm-core/src/openllm_core/utils/analytics.py +++ b/openllm-core/src/openllm_core/utils/analytics.py @@ -1,7 +1,7 @@ -'''Telemetry related for OpenLLM tracking. +"""Telemetry related for OpenLLM tracking. Users can disable this with OPENLLM_DO_NOT_TRACK envvar. -''' +""" from __future__ import annotations import contextlib import functools diff --git a/openllm-core/src/openllm_core/utils/codegen.py b/openllm-core/src/openllm_core/utils/codegen.py index 2e7700db..0b124a69 100644 --- a/openllm-core/src/openllm_core/utils/codegen.py +++ b/openllm-core/src/openllm_core/utils/codegen.py @@ -78,14 +78,14 @@ def _make_method(name: str, script: str, filename: str, globs: DictStrAny) -> An return locs[name] def make_attr_tuple_class(cls_name: str, attr_names: t.Sequence[str]) -> type[t.Any]: - '''Create a tuple subclass to hold class attributes. + """Create a tuple subclass to hold class attributes. The subclass is a bare tuple with properties for names. class MyClassAttributes(tuple): __slots__ = () x = property(itemgetter(0)) - ''' + """ from . import SHOW_CODEGEN attr_class_name = f'{cls_name}Attributes' @@ -139,7 +139,7 @@ def make_env_transformer(cls: type[openllm_core.LLMConfig], return generate_function(cls, '__auto_env', lines, args=('_', 'fields'), globs=globs, annotations={'_': 'type[LLMConfig]', 'fields': fields_ann, 'return': fields_ann}) def gen_sdk(func: _T, name: str | None = None, **attrs: t.Any) -> _T: - '''Enhance sdk with nice repr that plays well with your brain.''' + """Enhance sdk with nice repr that plays well with your brain.""" from openllm_core.utils import ReprMixin if name is None: name = func.__name__.strip('_') _signatures = inspect.signature(func).parameters diff --git a/openllm-core/src/openllm_core/utils/dantic.py b/openllm-core/src/openllm_core/utils/dantic.py index 13c5d3a7..fb6be61b 100644 --- a/openllm-core/src/openllm_core/utils/dantic.py +++ b/openllm-core/src/openllm_core/utils/dantic.py @@ -1,4 +1,4 @@ -'''An interface provides the best of pydantic and attrs.''' +"""An interface provides the best of pydantic and attrs.""" from __future__ import annotations import functools import importlib @@ -163,21 +163,21 @@ def parse_type(field_type: t.Any) -> ParamType | tuple[ParamType, ...]: return field_type def is_typing(field_type: type) -> bool: - '''Checks whether the current type is a module-like type. + """Checks whether the current type is a module-like type. Args: field_type: pydantic field type Returns: bool: true if the type is itself a type - ''' + """ raw = t.get_origin(field_type) if raw is None: return False if raw is type or raw is t.Type: return True return False def is_literal(field_type: type) -> bool: - '''Checks whether the given field type is a Literal type or not. + """Checks whether the given field type is a Literal type or not. Literals are weird: isinstance and subclass do not work, so you compare the origin with the Literal declaration itself. @@ -187,7 +187,7 @@ def is_literal(field_type: type) -> bool: Returns: bool: true if Literal type, false otherwise - ''' + """ origin = t.get_origin(field_type) return origin is not None and origin is t.Literal @@ -218,12 +218,12 @@ class EnumChoice(click.Choice): name = 'enum' def __init__(self, enum: Enum, case_sensitive: bool = False): - '''Enum type support for click that extends ``click.Choice``. + """Enum type support for click that extends ``click.Choice``. Args: enum: Given enum case_sensitive: Whether this choice should be case case_sensitive. - ''' + """ self.mapping = enum self.internal_type = type(enum) choices: list[t.Any] = [e.name for e in enum.__class__] @@ -241,7 +241,7 @@ class LiteralChoice(EnumChoice): name = 'literal' def __init__(self, value: t.Any, case_sensitive: bool = False): - '''Literal support for click.''' + """Literal support for click.""" # expect every literal value to belong to the same primitive type values = list(value.__args__) item_type = type(values[0]) @@ -277,14 +277,14 @@ def allows_multiple(field_type: type[t.Any]) -> bool: return False def is_mapping(field_type: type) -> bool: - '''Checks whether this field represents a dictionary or JSON object. + """Checks whether this field represents a dictionary or JSON object. Args: field_type (type): pydantic type Returns: bool: true when the field is a dict-like object, false otherwise. - ''' + """ # Early out for standard containers. from . import lenient_issubclass if lenient_issubclass(field_type, t.Mapping): return True @@ -313,14 +313,14 @@ def is_container(field_type: type) -> bool: return lenient_issubclass(origin, t.Container) def parse_container_args(field_type: type[t.Any]) -> ParamType | tuple[ParamType, ...]: - '''Parses the arguments inside a container type (lists, tuples and so on). + """Parses the arguments inside a container type (lists, tuples and so on). Args: field_type: pydantic field type Returns: ParamType | tuple[ParamType]: single click-compatible type or a tuple - ''' + """ if not is_container(field_type): raise ValueError('Field type is not a container type.') args = t.get_args(field_type) @@ -390,7 +390,7 @@ class CudaValueType(ParamType): return var def shell_complete(self, ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]: - '''Return a list of :class:`~click.shell_completion.CompletionItem` objects for the incomplete value. + """Return a list of :class:`~click.shell_completion.CompletionItem` objects for the incomplete value. Most types do not provide completions, but some do, and this allows custom types to provide custom completions as well. @@ -398,7 +398,7 @@ class CudaValueType(ParamType): ctx: Invocation context for this command. param: The parameter that is requesting completion. incomplete: Value being completed. May be empty. - ''' + """ from openllm_core.utils import available_devices mapping = incomplete.split(self.envvar_list_splitter) if incomplete else available_devices() return [sc.CompletionItem(str(i), help=f'CUDA device index {i}') for i in mapping] diff --git a/openllm-core/src/openllm_core/utils/import_utils.py b/openllm-core/src/openllm_core/utils/import_utils.py index b2503aed..15fc2f2e 100644 --- a/openllm-core/src/openllm_core/utils/import_utils.py +++ b/openllm-core/src/openllm_core/utils/import_utils.py @@ -1,4 +1,4 @@ -'''Some imports utils are vendorred from transformers/utils/import_utils.py for performance reasons.''' +"""Some imports utils are vendorred from transformers/utils/import_utils.py for performance reasons.""" from __future__ import annotations import importlib import importlib.metadata @@ -176,7 +176,7 @@ class EnvVarMixin(ReprMixin): raise KeyError(f'Key {item} not found in {self}') def __init__(self, model_name: str, backend: LiteralBackend = 'pt', model_id: str | None = None, quantize: LiteralString | None = None) -> None: - '''EnvVarMixin is a mixin class that returns the value extracted from environment variables.''' + """EnvVarMixin is a mixin class that returns the value extracted from environment variables.""" from openllm_core.utils import field_env_key self.model_name = inflection.underscore(model_name) self._backend = backend diff --git a/openllm-core/src/openllm_core/utils/lazy.py b/openllm-core/src/openllm_core/utils/lazy.py index f1893998..83aa88d8 100644 --- a/openllm-core/src/openllm_core/utils/lazy.py +++ b/openllm-core/src/openllm_core/utils/lazy.py @@ -168,12 +168,12 @@ class LazyModule(types.ModuleType): return result + [i for i in self.__all__ if i not in result] def __getattr__(self, name: str) -> t.Any: - '''Equivocal __getattr__ implementation. + """Equivocal __getattr__ implementation. It checks from _objects > _modules and does it recursively. It also contains a special case for all of the metadata information, such as __version__ and __version_info__. - ''' + """ if name in _reserved_namespace: raise openllm_core.exceptions.ForbiddenAttributeError(f"'{name}' is a reserved namespace for {self._name} and should not be access nor modified.") dunder_to_metadata = { diff --git a/openllm-python/src/openllm/__main__.py b/openllm-python/src/openllm/__main__.py index 6754a4d4..a5374806 100644 --- a/openllm-python/src/openllm/__main__.py +++ b/openllm-python/src/openllm/__main__.py @@ -1,11 +1,11 @@ -'''CLI entrypoint for OpenLLM. +"""CLI entrypoint for OpenLLM. Usage: openllm --help To start any OpenLLM model: openllm start --options ... -''' +""" from __future__ import annotations if __name__ == '__main__': diff --git a/openllm-python/src/openllm/_generation.py b/openllm-python/src/openllm/_generation.py index 8a78cab8..ddd46937 100644 --- a/openllm-python/src/openllm/_generation.py +++ b/openllm-python/src/openllm/_generation.py @@ -51,7 +51,7 @@ def is_sentence_complete(output: str) -> bool: return output.endswith(('.', '?', '!', '...', '。', '?', '!', '…', '"', "'", '”')) def is_partial_stop(output: str, stop_str: str) -> bool: - '''Check whether the output contains a partial stop str.''' + """Check whether the output contains a partial stop str.""" for i in range(0, min(len(output), len(stop_str))): if stop_str.startswith(output[-i:]): return True return False diff --git a/openllm-python/src/openllm/_llm.py b/openllm-python/src/openllm/_llm.py index 6dc2b611..080c3187 100644 --- a/openllm-python/src/openllm/_llm.py +++ b/openllm-python/src/openllm/_llm.py @@ -80,13 +80,13 @@ def normalise_model_name(name: str) -> str: return inflection.dasherize(name) def resolve_peft_config_type(adapter_map: dict[str, str]) -> AdapterMap: - '''Resolve the type of the PeftConfig given the adapter_map. + """Resolve the type of the PeftConfig given the adapter_map. This is similar to how PeftConfig resolve its config type. Args: adapter_map: The given mapping from either SDK or CLI. See CLI docs for more information. - ''' + """ resolved: AdapterMap = {} _has_set_default = False for path_or_adapter_id, name in adapter_map.items(): @@ -191,7 +191,7 @@ class LLM(t.Generic[M, T]): @apply(lambda val: tuple(str.lower(i) if i else i for i in val)) def _make_tag_components(self, model_id: str, model_version: str | None, backend: LiteralBackend) -> tuple[str, str | None]: - '''Return a valid tag name (---) and its tag version.''' + """Return a valid tag name (---) and its tag version.""" model_id, *maybe_revision = model_id.rsplit(':') if len(maybe_revision) > 0: if model_version is not None: logger.warning("revision is specified within 'model_id' (%s), and 'model_version=%s' will be ignored.", maybe_revision[0], model_version) diff --git a/openllm-python/src/openllm/bundle/_package.py b/openllm-python/src/openllm/bundle/_package.py index 5d8ae6ac..0bfe1e75 100644 --- a/openllm-python/src/openllm/bundle/_package.py +++ b/openllm-python/src/openllm/bundle/_package.py @@ -44,7 +44,7 @@ logger = logging.getLogger(__name__) OPENLLM_DEV_BUILD = 'OPENLLM_DEV_BUILD' def build_editable(path: str, package: t.Literal['openllm', 'openllm_core', 'openllm_client'] = 'openllm') -> str | None: - '''Build OpenLLM if the OPENLLM_DEV_BUILD environment variable is set.''' + """Build OpenLLM if the OPENLLM_DEV_BUILD environment variable is set.""" if str(os.environ.get(OPENLLM_DEV_BUILD, False)).lower() != 'true': return None # We need to build the package in editable mode, so that we can import it from build import ProjectBuilder diff --git a/openllm-python/src/openllm/bundle/oci/__init__.py b/openllm-python/src/openllm/bundle/oci/__init__.py index 28c2b490..b144652e 100644 --- a/openllm-python/src/openllm/bundle/oci/__init__.py +++ b/openllm-python/src/openllm/bundle/oci/__init__.py @@ -1,5 +1,5 @@ # mypy: disable-error-code="misc" -'''OCI-related utilities for OpenLLM. This module is considered to be internal and API are subjected to change.''' +"""OCI-related utilities for OpenLLM. This module is considered to be internal and API are subjected to change.""" from __future__ import annotations import functools import importlib diff --git a/openllm-python/src/openllm/cli/__init__.py b/openllm-python/src/openllm/cli/__init__.py index 8d27d1b9..cd72ef8d 100644 --- a/openllm-python/src/openllm/cli/__init__.py +++ b/openllm-python/src/openllm/cli/__init__.py @@ -1,4 +1,4 @@ -'''OpenLLM CLI. +"""OpenLLM CLI. For more information see ``openllm -h``. -''' +""" diff --git a/openllm-python/src/openllm/cli/_factory.py b/openllm-python/src/openllm/cli/_factory.py index b1b6ce19..db3aa4a3 100644 --- a/openllm-python/src/openllm/cli/_factory.py +++ b/openllm-python/src/openllm/cli/_factory.py @@ -290,7 +290,7 @@ def parse_device_callback(ctx: click.Context, param: click.Parameter, value: tup _IGNORED_OPTIONS = {'working_dir', 'production', 'protocol_version'} def parse_serve_args(serve_grpc: bool) -> t.Callable[[t.Callable[..., LLMConfig]], t.Callable[[FC], FC]]: - '''Parsing `bentoml serve|serve-grpc` click.Option to be parsed via `openllm start`.''' + """Parsing `bentoml serve|serve-grpc` click.Option to be parsed via `openllm start`.""" from bentoml_cli.cli import cli command = 'serve' if not serve_grpc else 'serve-grpc' @@ -320,11 +320,11 @@ def parse_serve_args(serve_grpc: bool) -> t.Callable[[t.Callable[..., LLMConfig] _http_server_args, _grpc_server_args = parse_serve_args(False), parse_serve_args(True) def _click_factory_type(*param_decls: t.Any, **attrs: t.Any) -> t.Callable[[FC | None], FC]: - '''General ``@click`` decorator with some sauce. + """General ``@click`` decorator with some sauce. This decorator extends the default ``@click.option`` plus a factory option and factory attr to provide type-safe click.option or click.argument wrapper for all compatible factory. - ''' + """ factory = attrs.pop('factory', click) factory_attr = attrs.pop('attr', 'option') if factory_attr != 'argument': attrs.setdefault('help', 'General option for OpenLLM CLI.') diff --git a/openllm-python/src/openllm/cli/_sdk.py b/openllm-python/src/openllm/cli/_sdk.py index 00734ff2..f5b241c5 100644 --- a/openllm-python/src/openllm/cli/_sdk.py +++ b/openllm-python/src/openllm/cli/_sdk.py @@ -262,7 +262,7 @@ def _import_model(model_name: str, return import_command.main(args=args, standalone_mode=False) def _list_models() -> dict[str, t.Any]: - '''List all available models within the local store.''' + """List all available models within the local store.""" from .entrypoint import models_command return models_command.main(args=['-o', 'json', '--show-available', '--machine'], standalone_mode=False) diff --git a/openllm-python/src/openllm/cli/entrypoint.py b/openllm-python/src/openllm/cli/entrypoint.py index fddf8721..8274e094 100644 --- a/openllm-python/src/openllm/cli/entrypoint.py +++ b/openllm-python/src/openllm/cli/entrypoint.py @@ -151,7 +151,7 @@ class OpenLLMCommandGroup(BentoMLCommandGroup): @staticmethod def common_params(f: t.Callable[P, t.Any]) -> t.Callable[[FC], FC]: # The following logics is similar to one of BentoMLCommandGroup - @cog.optgroup.group(name='Global options', help='Shared globals options for all OpenLLM CLI.') + @cog.optgroup.group(name='Global options', help='Shared globals options for all OpenLLM CLI.') # type: ignore[misc] @cog.optgroup.option('-q', '--quiet', envvar=QUIET_ENV_VAR, is_flag=True, default=False, help='Suppress all output.', show_envvar=True) @cog.optgroup.option('--debug', '--verbose', 'debug', envvar=DEBUG_ENV_VAR, is_flag=True, default=False, help='Print out debug logs.', show_envvar=True) @cog.optgroup.option('--do-not-track', is_flag=True, default=False, envvar=analytics.OPENLLM_DO_NOT_TRACK, help='Do not send usage info', show_envvar=True) @@ -249,7 +249,7 @@ class OpenLLMCommandGroup(BentoMLCommandGroup): return decorator def format_commands(self, ctx: click.Context, formatter: click.HelpFormatter) -> None: - '''Additional format methods that include extensions as well as the default cli command.''' + """Additional format methods that include extensions as well as the default cli command.""" from gettext import gettext as _ commands: list[tuple[str, click.Command]] = [] extensions: list[tuple[str, click.Command]] = [] @@ -285,7 +285,7 @@ class OpenLLMCommandGroup(BentoMLCommandGroup): '-v', message=f"%(prog)s, %(version)s (compiled: {'yes' if openllm.COMPILED else 'no'})\nPython ({platform.python_implementation()}) {platform.python_version()}") def cli() -> None: - '''\b + """\b ██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗ ██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║ ██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║ @@ -296,27 +296,27 @@ def cli() -> None: \b An open platform for operating large language models in production. Fine-tune, serve, deploy, and monitor any LLMs with ease. - ''' + """ @cli.group(cls=OpenLLMCommandGroup, context_settings=termui.CONTEXT_SETTINGS, name='start', aliases=['start-http']) def start_command() -> None: - '''Start any LLM as a REST server. + """Start any LLM as a REST server. \b ```bash $ openllm -- ... ``` - ''' + """ @cli.group(cls=OpenLLMCommandGroup, context_settings=termui.CONTEXT_SETTINGS, name='start-grpc') def start_grpc_command() -> None: - '''Start any LLM as a gRPC server. + """Start any LLM as a gRPC server. \b ```bash $ openllm start-grpc -- ... ``` - ''' + """ _start_mapping = { 'start': { @@ -424,7 +424,7 @@ def import_command(model_name: str, model_id: str | None, converter: str | None, @click.option('--bento-version', type=str, default=None, help='Optional bento version for this BentoLLM. Default is the the model revision.') @click.option('--overwrite', is_flag=True, help='Overwrite existing Bento for given LLM if it already exists.') @workers_per_resource_option(factory=click, build=True) -@cog.optgroup.group(cls=cog.MutuallyExclusiveOptionGroup, name='Optimisation options') +@cog.optgroup.group(cls=cog.MutuallyExclusiveOptionGroup, name='Optimisation options') # type: ignore[misc] @quantize_option(factory=cog.optgroup, build=True) @click.option('--enable-features', multiple=True, @@ -445,7 +445,7 @@ def import_command(model_name: str, model_id: str | None, converter: str | None, type=click.Choice(['release', 'latest', 'nightly']), default='release', help="Default container version strategy for the image from '--container-registry'") -@cog.optgroup.group(cls=cog.MutuallyExclusiveOptionGroup, name='Utilities options') +@cog.optgroup.group(cls=cog.MutuallyExclusiveOptionGroup, name='Utilities options') # type: ignore[misc] @cog.optgroup.option('--containerize', default=False, is_flag=True, @@ -459,7 +459,7 @@ def build_command(ctx: click.Context, /, model_name: str, model_id: str | None, system_message: str | None, prompt_template_file: t.IO[t.Any] | None, machine: bool, model_version: str | None, dockerfile_template: t.TextIO | None, containerize: bool, push: bool, serialisation: LiteralSerialisation | None, container_registry: LiteralContainerRegistry, container_version_strategy: LiteralContainerVersionStrategy, force_push: bool, **attrs: t.Any) -> bentoml.Bento: - '''Package a given models into a Bento. + """Package a given models into a Bento. \b ```bash @@ -475,7 +475,7 @@ def build_command(ctx: click.Context, /, model_name: str, model_id: str | None, > [!IMPORTANT] > To build the bento with compiled OpenLLM, make sure to prepend HATCH_BUILD_HOOKS_ENABLE=1. Make sure that the deployment > target also use the same Python version and architecture as build machine. - ''' + """ if machine: output = 'porcelain' if enable_features: enable_features = tuple(itertools.chain.from_iterable((s.split(',') for s in enable_features))) @@ -679,11 +679,11 @@ def prune_command(model_name: str | None, include_bentos: bool, model_store: ModelStore = Provide[BentoMLContainer.model_store], bento_store: BentoStore = Provide[BentoMLContainer.bento_store]) -> None: - '''Remove all saved models, (and optionally bentos) built with OpenLLM locally. + """Remove all saved models, (and optionally bentos) built with OpenLLM locally. \b If a model type is passed, then only prune models for that given model type. - ''' + """ available: list[tuple[bentoml.Model | bentoml.Bento, ModelStore | BentoStore]] = [(m, model_store) for m in bentoml.models.list() if 'framework' in m.info.labels and m.info.labels['framework'] == 'openllm'] if model_name is not None: available = [(m, store) for m, store in available if 'model_name' in m.info.labels and m.info.labels['model_name'] == inflection.underscore(model_name)] @@ -823,6 +823,6 @@ def query_command(ctx: click.Context, /, prompt: str, endpoint: str, timeout: in @cli.group(cls=Extensions, hidden=True, name='extension') def extension_command() -> None: - '''Extension for OpenLLM CLI.''' + """Extension for OpenLLM CLI.""" if __name__ == '__main__': cli() diff --git a/openllm-python/src/openllm/cli/extension/dive_bentos.py b/openllm-python/src/openllm/cli/extension/dive_bentos.py index 8ddf43ac..10f9d327 100644 --- a/openllm-python/src/openllm/cli/extension/dive_bentos.py +++ b/openllm-python/src/openllm/cli/extension/dive_bentos.py @@ -25,7 +25,7 @@ if t.TYPE_CHECKING: @click.pass_context @inject def cli(ctx: click.Context, bento: str, machine: bool, _bento_store: BentoStore = Provide[BentoMLContainer.bento_store]) -> str | None: - '''Dive into a BentoLLM. This is synonymous to cd $(b get : -o path).''' + """Dive into a BentoLLM. This is synonymous to cd $(b get : -o path).""" try: bentomodel = _bento_store.get(bento) except bentoml.exceptions.NotFound: diff --git a/openllm-python/src/openllm/cli/extension/get_prompt.py b/openllm-python/src/openllm/cli/extension/get_prompt.py index f0b393b9..c97ed39c 100644 --- a/openllm-python/src/openllm/cli/extension/get_prompt.py +++ b/openllm-python/src/openllm/cli/extension/get_prompt.py @@ -31,7 +31,7 @@ LiteralOutput = t.Literal['json', 'pretty', 'porcelain'] metavar='ARG=VALUE[,ARG=VALUE]') @click.pass_context def cli(ctx: click.Context, /, model_name: str, prompt: str, format: str | None, output: LiteralOutput, machine: bool, _memoized: dict[str, t.Any], **_: t.Any) -> str | None: - '''Get the default prompt used by OpenLLM.''' + """Get the default prompt used by OpenLLM.""" module = openllm.utils.EnvVarMixin(model_name).module _memoized = {k: v[0] for k, v in _memoized.items() if v} try: diff --git a/openllm-python/src/openllm/cli/extension/list_bentos.py b/openllm-python/src/openllm/cli/extension/list_bentos.py index 90004c6f..ffbc8ab6 100644 --- a/openllm-python/src/openllm/cli/extension/list_bentos.py +++ b/openllm-python/src/openllm/cli/extension/list_bentos.py @@ -16,7 +16,7 @@ from openllm.cli._factory import output_option @output_option(default_value='json') @click.pass_context def cli(ctx: click.Context, output: LiteralOutput) -> None: - '''List available bentos built by OpenLLM.''' + """List available bentos built by OpenLLM.""" mapping = { k: [{ 'tag': str(b.tag), diff --git a/openllm-python/src/openllm/cli/extension/list_models.py b/openllm-python/src/openllm/cli/extension/list_models.py index fdcab1cc..c6cb9975 100644 --- a/openllm-python/src/openllm/cli/extension/list_models.py +++ b/openllm-python/src/openllm/cli/extension/list_models.py @@ -22,7 +22,7 @@ if t.TYPE_CHECKING: @model_name_argument(required=False, shell_complete=model_complete_envvar) @output_option(default_value='json') def cli(model_name: str | None, output: LiteralOutput) -> DictStrAny: - '''This is equivalent to openllm models --show-available less the nice table.''' + """This is equivalent to openllm models --show-available less the nice table.""" models = tuple(inflection.dasherize(key) for key in openllm.CONFIG_MAPPING.keys()) ids_in_local_store = { k: [i for i in bentoml.models.list() if 'framework' in i.info.labels and i.info.labels['framework'] == 'openllm' and 'model_name' in i.info.labels and i.info.labels['model_name'] == k] diff --git a/openllm-python/src/openllm/entrypoints/__init__.py b/openllm-python/src/openllm/entrypoints/__init__.py index 87db3ecd..a7bf4825 100644 --- a/openllm-python/src/openllm/entrypoints/__init__.py +++ b/openllm-python/src/openllm/entrypoints/__init__.py @@ -1,11 +1,11 @@ -'''Entrypoint for all third-party apps. +"""Entrypoint for all third-party apps. Currently support OpenAI compatible API. Each module should implement the following API: - `mount_to_svc(svc: bentoml.Service, llm: openllm.LLM[M, T]) -> bentoml.Service: ...` -''' +""" from __future__ import annotations import typing as t diff --git a/openllm-python/src/openllm/entrypoints/_openapi.py b/openllm-python/src/openllm/entrypoints/_openapi.py index 8b8b4363..4e52f12b 100644 --- a/openllm-python/src/openllm/entrypoints/_openapi.py +++ b/openllm-python/src/openllm/entrypoints/_openapi.py @@ -480,7 +480,7 @@ def get_generator(title: str, components: list[type[AttrsInstance]] | None = Non def component_schema_generator(attr_cls: type[AttrsInstance], description: str | None = None) -> dict[str, t.Any]: schema: dict[str, t.Any] = {'type': 'object', 'required': [], 'properties': {}, 'title': attr_cls.__name__} schema['description'] = first_not_none(getattr(attr_cls, '__doc__', None), description, default=f'Generated components for {attr_cls.__name__}') - for field in attr.fields(attr.resolve_types(attr_cls)): # type: ignore[misc] + for field in attr.fields(attr.resolve_types(attr_cls)): # type: ignore[misc,type-var] attr_type = field.type origin_type = t.get_origin(attr_type) args_type = t.get_args(attr_type) @@ -495,21 +495,12 @@ def component_schema_generator(attr_cls: type[AttrsInstance], description: str | elif origin_type is dict: schema_type = 'object' # Assuming string keys for simplicity, and handling Any type for values - prop_schema = { - 'type': 'object', - 'additionalProperties': - True if args_type[1] is t.Any else { - 'type': 'string' - } # Simplified - } + prop_schema = {'type': 'object', 'additionalProperties': True if args_type[1] is t.Any else {'type': 'string'}} elif attr_type == t.Optional[str]: schema_type = 'string' elif origin_type is t.Union and t.Any in args_type: schema_type = 'object' - prop_schema = { - 'type': 'object', - 'additionalProperties': True # Allows any type of values - } + prop_schema = {'type': 'object', 'additionalProperties': True} else: schema_type = 'string' diff --git a/openllm-python/src/openllm/exceptions.py b/openllm-python/src/openllm/exceptions.py index 3f56a271..2380a2ef 100644 --- a/openllm-python/src/openllm/exceptions.py +++ b/openllm-python/src/openllm/exceptions.py @@ -1,4 +1,4 @@ -'''Base exceptions for OpenLLM. This extends BentoML exceptions.''' +"""Base exceptions for OpenLLM. This extends BentoML exceptions.""" from __future__ import annotations from openllm_core.exceptions import Error as Error diff --git a/openllm-python/src/openllm/protocol/__init__.py b/openllm-python/src/openllm/protocol/__init__.py index bee13ece..231c65ea 100644 --- a/openllm-python/src/openllm/protocol/__init__.py +++ b/openllm-python/src/openllm/protocol/__init__.py @@ -1,7 +1,7 @@ -'''Protocol-related packages for all library integrations. +"""Protocol-related packages for all library integrations. Currently support OpenAI compatible API. -''' +""" from __future__ import annotations import os import typing as t diff --git a/openllm-python/src/openllm/serialisation/__init__.py b/openllm-python/src/openllm/serialisation/__init__.py index e9553300..b1831c19 100644 --- a/openllm-python/src/openllm/serialisation/__init__.py +++ b/openllm-python/src/openllm/serialisation/__init__.py @@ -1,9 +1,9 @@ -'''Serialisation utilities for OpenLLM. +"""Serialisation utilities for OpenLLM. Currently supports transformers for PyTorch, and vLLM. Currently, GGML format is working in progress. -''' +""" from __future__ import annotations import importlib import typing as t @@ -32,11 +32,11 @@ else: P = ParamSpec('P') def load_tokenizer(llm: openllm.LLM[t.Any, T], **tokenizer_attrs: t.Any) -> T: - '''Load the tokenizer from BentoML store. + """Load the tokenizer from BentoML store. By default, it will try to find the bentomodel whether it is in store.. If model is not found, it will raises a ``bentoml.exceptions.NotFound``. - ''' + """ from .transformers._helpers import process_config config, *_ = process_config(llm.bentomodel.path, llm.trust_remote_code) diff --git a/openllm-python/src/openllm/serialisation/ggml.py b/openllm-python/src/openllm/serialisation/ggml.py index babb0c23..dbe4df44 100644 --- a/openllm-python/src/openllm/serialisation/ggml.py +++ b/openllm-python/src/openllm/serialisation/ggml.py @@ -1,7 +1,7 @@ -'''Serialisation related implementation for GGML-based implementation. +"""Serialisation related implementation for GGML-based implementation. This requires ctransformers to be installed. -''' +""" from __future__ import annotations import typing as t diff --git a/openllm-python/src/openllm/serialisation/transformers/__init__.py b/openllm-python/src/openllm/serialisation/transformers/__init__.py index 75439aac..e2a8c9c6 100644 --- a/openllm-python/src/openllm/serialisation/transformers/__init__.py +++ b/openllm-python/src/openllm/serialisation/transformers/__init__.py @@ -1,4 +1,4 @@ -'''Serialisation related implementation for Transformers-based implementation.''' +"""Serialisation related implementation for Transformers-based implementation.""" from __future__ import annotations import importlib import logging @@ -150,13 +150,13 @@ def import_model(llm: openllm.LLM[M, T], *decls: t.Any, trust_remote_code: bool, return bentomodel def get(llm: openllm.LLM[M, T], auto_import: bool = False) -> bentoml.Model: - '''Return an instance of ``bentoml.Model`` from given LLM instance. + """Return an instance of ``bentoml.Model`` from given LLM instance. By default, it will try to check the model in the local store. If model is not found, and ``auto_import`` is set to True, it will try to import the model from HuggingFace Hub. Otherwise, it will raises a ``bentoml.exceptions.NotFound``. - ''' + """ try: model = bentoml.models.get(llm.tag) backend = model.info.labels['backend'] diff --git a/openllm-python/src/openllm/serialisation/transformers/_helpers.py b/openllm-python/src/openllm/serialisation/transformers/_helpers.py index 36fb1c02..ef2b1f3e 100644 --- a/openllm-python/src/openllm/serialisation/transformers/_helpers.py +++ b/openllm-python/src/openllm/serialisation/transformers/_helpers.py @@ -26,7 +26,7 @@ def get_hash(config: transformers.PretrainedConfig) -> str: return _commit_hash def process_config(model_id: str, trust_remote_code: bool, **attrs: t.Any) -> tuple[transformers.PretrainedConfig, DictStrAny, DictStrAny]: - '''A helper function that correctly parse config and attributes for transformers.PretrainedConfig. + """A helper function that correctly parse config and attributes for transformers.PretrainedConfig. Args: model_id: Model id to pass into ``transformers.AutoConfig``. @@ -35,7 +35,7 @@ def process_config(model_id: str, trust_remote_code: bool, **attrs: t.Any) -> tu Returns: A tuple of ``transformers.PretrainedConfig``, all hub attributes, and remanining attributes that can be used by the Model class. - ''' + """ config = attrs.pop('config', None) # this logic below is synonymous to handling `from_pretrained` attrs. hub_attrs = {k: attrs.pop(k) for k in HUB_ATTRS if k in attrs} diff --git a/openllm-python/src/openllm/testing.py b/openllm-python/src/openllm/testing.py index d6078d1f..c359f99c 100644 --- a/openllm-python/src/openllm/testing.py +++ b/openllm-python/src/openllm/testing.py @@ -1,4 +1,4 @@ -'''Tests utilities for OpenLLM.''' +"""Tests utilities for OpenLLM.""" from __future__ import annotations import contextlib import logging diff --git a/openllm-python/tests/_strategies/_configuration.py b/openllm-python/tests/_strategies/_configuration.py index 08c61836..fab36f8e 100644 --- a/openllm-python/tests/_strategies/_configuration.py +++ b/openllm-python/tests/_strategies/_configuration.py @@ -14,7 +14,7 @@ env_strats = st.sampled_from([openllm.utils.EnvVarMixin(model_name) for model_na @st.composite def model_settings(draw: st.DrawFn): - '''Strategy for generating ModelSettings objects.''' + """Strategy for generating ModelSettings objects.""" kwargs: dict[str, t.Any] = { 'default_id': st.text(min_size=1), 'model_ids': st.lists(st.text(), min_size=1), diff --git a/pyproject.toml b/pyproject.toml index b8b9222f..f830f91d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -155,103 +155,6 @@ whitelist-regex = ["test_.*"] [tool.check-wheel-contents] toplevel = ["openllm"] -[tool.ruff] -indent-width = 2 -extend-exclude = [ - "tools", - "examples", - "openllm-python/src/openllm/playground", - "openllm-python/src/openllm/__init__.py", - "openllm-python/src/openllm/_version.py", - "openllm-python/src/openllm/models/__init__.py", - "openllm-client/src/openllm_client/pb/**", -] -extend-include = ["*.ipynb"] -extend-select = [ - "I", # isort - "G", # flake8-logging-format - "W", # pycodestyle - "Q", # flake8-quotes - "FA", # flake8-future-annotations - "TCH", # flake8-type-checking - "PLW", # pylint-warning - "PLR", # pylint-refactor - "PT", # flake8-pytest-style - "PERF", # perflint - "FLY", # flynt - "RUF", # Ruff-specific rules - "YTT", # flake8-2020 -] -fix = true -ignore = [ - "PLR0911", - "PLR0912", - "PLR0913", - "PLR0915", - "PLR2004", # magic value to use constant - "E501", # ignore line length violation - "E401", # ignore multiple line import - "E702", - "TCH004", # don't move runtime import out, just warn about it - "RUF012", # mutable attributes to be used with ClassVar - "E701", # multiple statement on single line -] -line-length = 192 -target-version = "py312" -typing-modules = ["openllm_core._typing_compat"] -unfixable = ["TCH004"] -[tool.ruff.flake8-type-checking] -exempt-modules = ["typing", "typing_extensions", "openllm_core._typing_compat"] -runtime-evaluated-base-classes = [ - "pydantic.BaseModel", - "openllm_core._configuration.LLMConfig", - "openllm_core._configuration.GenerationConfig", - "openllm_core._configuration.ModelSettings", - "openllm.LLMConfig", -] -runtime-evaluated-decorators = ["attrs.define", "attrs.frozen", "trait"] -[tool.ruff.format] -quote-style = "single" -indent-style = "space" -skip-magic-trailing-comma = true -[tool.ruff.pydocstyle] -convention = "google" -[tool.ruff.pycodestyle] -ignore-overlong-task-comments = true -[tool.ruff.isort] -combine-as-imports = true -force-single-line = true -force-wrap-aliases = false -known-first-party = ["openllm", "bentoml", "openllm_core", "openllm_client"] -known-third-party = [ - "transformers", - "click", - "huggingface_hub", - "torch", - "vllm", - "auto_gptq", - "peft", - "click_option_group", -] -lines-after-imports = 1 -lines-between-types = 1 -no-lines-before = ["future", "standard-library"] -relative-imports-order = "closest-to-furthest" -[tool.ruff.flake8-quotes] -avoid-escape = false -multiline-quotes = "single" -inline-quotes = "single" -docstring-quotes = "single" -[tool.ruff.extend-per-file-ignores] -"openllm-python/src/openllm/_service.py" = ["E401"] -"openllm-python/src/openllm/models/**" = ["E", "F", "I001"] -"openllm-python/tests/**/*" = ["S101", "TID252", "PT011", "S307"] -"openllm-python/src/openllm/_llm.py" = ["F811"] -"openllm-core/src/openllm_core/utils/import_utils.py" = ["PLW0603", "F811"] -"openllm-core/src/openllm_core/_configuration.py" = ["F811"] -"openllm-client/src/openllm_client/bentoclient/_http.py" = ["PERF203"] -"typings/**" = ["F", "E"] - [tool.yapfignore] ignore_patterns = [ "openllm-python/src/openllm/playground", @@ -261,7 +164,7 @@ ignore_patterns = [ ] [tool.yapf] -based_on_style = "google" +BASED_ON_STYLE = "google" INDENT_WIDTH = 2 JOIN_MULTIPLE_LINES = true COLUMN_LIMIT = 192 @@ -281,146 +184,3 @@ COALESCE_BRACKETS = true addopts = ["-rfEX", "-pno:warnings", "--snapshot-warn-unused"] python_files = ["test_*.py", "*_test.py"] testpaths = ["openllm-python/tests"] - -[tool.coverage.paths] -openllm = [ - "openllm-python/src/openllm", - "*/openllm-python/src/openllm", - "openllm-client/src/openllm_client", - "*/openllm-client/src/openllm_client", - "openllm-core/src/openllm_core", - "*/openllm-core/src/openllm_core", -] -[tool.coverage.run] -branch = true -omit = [ - "__pypackages__/*", - "openllm-python/src/openllm/_version.py", - "openllm-python/src/openllm/playground/", - "openllm-python/src/openllm/__init__.py", - "openllm-python/src/openllm/__main__.py", - "openllm-core/src/openllm_core/_typing_compat.py", - "openllm-client/src/openllm_client/pb/**", -] -source_pkgs = ["openllm", "openllm_core", "openllm_client"] -[tool.coverage.report] -exclude_lines = [ - "no cov", - "pragma: no cover", - "if __name__ == .__main__.:", - "if t.TYPE_CHECKING:", - "if _t.TYPE_CHECKING:", - 'if TYPE_CHECKING:', - 'if typing.TYPE_CHECKING:', - 'if t.TYPE_CHECKING and not MYPY:', - 'if DEBUG:', - 'if utils.DEBUG', - 'if openllm.utils.DEBUG', - 'if openllm_core.utils.DEBUG', - '@_overload', - '@overload', - '@t.overload', - '@typing.overload', - 'raise NotImplementedError', - 'raise NotImplemented', - 'except MissingDependencyError:', -] -omit = [ - "__pypackages__/*", - "openllm-python/src/openllm/_version.py", - "openllm-python/src/openllm/playground/", - "openllm-python/src/openllm/__init__.py", - "openllm-python/src/openllm/__main__.py", - "openllm-core/src/openllm_core/_typing_compat.py", - "openllm-client/src/openllm_client/pb/**", -] -precision = 2 -show_missing = true - -[tool.pyright] -useLibraryCodeForTypes = true -verboseOutput = true -include = ["openllm-core/src", "openllm-client/src", "openllm-python/src"] -defineConstant = { MYPY = true } -exclude = [ - "__pypackages__/*", - "cz.py", - "tools", - "examples", - "typings", - "openllm-python/src/openllm/playground/", - "openllm-python/src/openllm/models/", - "openllm-python/src/openllm/__init__.py", - "openllm-python/src/openllm/__main__.py", - "openllm-python/tests", - "openllm-core/src/openllm_core/_typing_compat.py", - "openllm-client/src/openllm_client/pb/**", -] -pythonVersion = "3.12" -enableExperimentalFeatures = true -reportMissingImports = "warning" -reportMissingTypeStubs = false -reportPrivateUsage = "warning" -reportUnknownArgumentType = "warning" -reportUnknownMemberType = "warning" -reportUnknownVariableType = "warning" -reportUnsupportedDunderAll = "warning" -reportWildcardImportFromLibrary = "warning" -typeCheckingMode = "strict" - -[tool.mypy] -exclude = [ - "__pypackages__/*", - "examples", - "tools", - "cz.py", - "setup.py", - "openllm-python/setup.py", - "openllm-client/setup.py", - "openllm-core/setup.py", - "openllm-python/tests", - "openllm-python/src/openllm/playground", - "openllm-python/src/openllm/models", - "openllm-python/src/openllm/_service.py", - "openllm-core/src/openllm_core/_typing_compat.py", - "openllm-client/src/openllm_client/pb/**", -] -modules = ["openllm", "openllm_core", "openllm_client"] -mypy_path = "typings:openllm-core/src:openllm-client/src" -pretty = true -python_version = "3.8" -show_error_codes = true -strict = true -warn_return_any = false -warn_unreachable = true -warn_unused_ignores = false -explicit_package_bases = true -[[tool.mypy.overrides]] -ignore_missing_imports = true -module = [ - "IPython.*", - "numpy.*", - "tensorflow.*", - "torch.*", - "optimum.*", - "inflection.*", - "huggingface_hub.*", - "click_option_group.*", - "peft.*", - "auto_gptq.*", - "vllm.*", - "orjson.*", - "httpx.*", - "cloudpickle.*", - "circus.*", - "grpc_health.v1.*", - "transformers.*", - "ghapi.*", -] -[[tool.mypy.overrides]] -ignore_errors = true -module = [ - "openllm.models.*", - "openllm.playground.*", - "openllm_core._typing_compat", -] diff --git a/pyrightconfig.json b/pyrightconfig.json new file mode 100644 index 00000000..6aec6fdc --- /dev/null +++ b/pyrightconfig.json @@ -0,0 +1,36 @@ +{ + "useLibraryCodeForTypes": true, + "typeCheckingMode": "strict", + "verboseOutput": true, + "include": [ + "openllm-core/src", + "openllm-client/src", + "openllm-python/src" + ], + "define": { + "MYPY": true + }, + "exclude": [ + "cz.py", + "tools", + "examples", + "typings", + "openllm-python/src/openllm/playground/", + "openllm-python/src/openllm/models/", + "openllm-python/src/openllm/__init__.py", + "openllm-python/src/openllm/__main__.py", + "openllm-python/tests", + "openllm-core/src/openllm_core/_typing_compat.py", + "openllm-client/src/openllm_client/pb/**" + ], + "pythonVersion": "3.12", + "enableExperimentalFeatures": true, + "reportMissingImports": "warning", + "reportMissingTypeStubs": false, + "reportPrivateUsage": "warning", + "reportUnknownArgumentType": "warning", + "reportUnknownMemberType": "warning", + "reportUnknownVariableType": "warning", + "reportUnsupportedDunderAll": "warning", + "reportWildcardImportFromLibrary": "warning" +} diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 00000000..f878776c --- /dev/null +++ b/ruff.toml @@ -0,0 +1,99 @@ +indent-width = 2 +extend-exclude = [ + "tools", + "examples", + "openllm-python/src/openllm/playground", + "openllm-python/src/openllm/__init__.py", + "openllm-python/src/openllm/_version.py", + "openllm-python/src/openllm/models/__init__.py", + "openllm-client/src/openllm_client/pb/**", +] +extend-include = ["*.ipynb"] +extend-select = [ + "I", # isort + "G", # flake8-logging-format + "W", # pycodestyle + "Q", # flake8-quotes + "FA", # flake8-future-annotations + "TCH", # flake8-type-checking + "PLW", # pylint-warning + "PLR", # pylint-refactor + "PT", # flake8-pytest-style + "PERF", # perflint + "FLY", # flynt + "RUF", # Ruff-specific rules + "YTT", # flake8-2020 +] +fix = true +ignore = [ + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + "PLR2004", # magic value to use constant + "E501", # ignore line length violation + "E401", # ignore multiple line import + "E702", + "TCH004", # don't move runtime import out, just warn about it + "RUF012", # mutable attributes to be used with ClassVar + "E701", # multiple statement on single line +] +line-length = 192 +target-version = "py312" +typing-modules = ["openllm_core._typing_compat"] +unfixable = ["TCH004"] + +[flake8-type-checking] +exempt-modules = ["typing", "typing_extensions", "openllm_core._typing_compat"] +runtime-evaluated-base-classes = [ + "openllm_core._configuration.LLMConfig", + "openllm_core._configuration.GenerationConfig", + "openllm_core._configuration.SamplingParams", + "openllm_core._configuration.ModelSettings", + "openllm.LLMConfig", +] +runtime-evaluated-decorators = ["attrs.define", "attrs.frozen", "trait"] + +[format] +quote-style = "single" +indent-style = "space" +skip-magic-trailing-comma = true + +[pydocstyle] +convention = "google" + +[pycodestyle] +ignore-overlong-task-comments = true + +[isort] +combine-as-imports = true +force-single-line = true +force-wrap-aliases = false +known-first-party = ["openllm", "bentoml", "openllm_core", "openllm_client"] +known-third-party = [ + "transformers", + "click", + "huggingface_hub", + "torch", + "vllm", + "auto_gptq", + "peft", + "click_option_group", +] +lines-after-imports = 1 +lines-between-types = 1 +no-lines-before = ["future", "standard-library"] +relative-imports-order = "closest-to-furthest" + +[flake8-quotes] +avoid-escape = false +multiline-quotes = "single" +inline-quotes = "single" +docstring-quotes = "double" + +[extend-per-file-ignores] +"openllm-python/src/openllm/models/**" = ["E", "F", "I001"] +"openllm-python/tests/**/*" = ["S101", "TID252", "PT011", "S307"] +"openllm-python/src/openllm/_llm.py" = ["F811"] +"openllm-core/src/openllm_core/utils/import_utils.py" = ["PLW0603", "F811"] +"openllm-core/src/openllm_core/_configuration.py" = ["F811"] diff --git a/typings/attr/__init__.pyi b/typings/attr/__init__.pyi deleted file mode 100644 index f0d68c6c..00000000 --- a/typings/attr/__init__.pyi +++ /dev/null @@ -1,575 +0,0 @@ -import enum -import sys - -from typing import Any -from typing import Callable -from typing import Dict -from typing import Generic -from typing import List -from typing import Literal -from typing import Mapping -from typing import Optional -from typing import Protocol -from typing import Sequence -from typing import Tuple -from typing import Type -from typing import TypeVar -from typing import Union -from typing import overload - -if sys.version_info[:2] >= (3, 11): - from typing import ParamSpec - from typing import TypeAlias - from typing import TypeGuard - from typing import dataclass_transform -else: - from typing_extensions import ParamSpec - from typing_extensions import TypeAlias - from typing_extensions import TypeGuard - from typing_extensions import dataclass_transform - -from . import converters as converters -from . import exceptions as exceptions -from . import filters as filters -from . import setters as setters -from . import validators as validators -from ._cmp import cmp_using as cmp_using -from ._typing_compat import AttrsInstance_ -from ._version_info import VersionInfo - -__version__: str -__version_info__: VersionInfo -__title__: str -__description__: str -__url__: str -__uri__: str -__author__: str -__email__: str -__license__: str -__copyright__: str -_T = TypeVar('_T') -_C = TypeVar('_C', bound=type) -_P = ParamSpec('_P') -_EqOrderType: TypeAlias = Union[bool, Callable[[Any], Any]] -_ValidatorType: TypeAlias = Callable[[Any, Attribute[_T], _T], Any] -_ConverterType: TypeAlias = Callable[[Any], Any] -_FilterType: TypeAlias = Callable[[Attribute[_T], _T], bool] -_ReprType: TypeAlias = Callable[[Any], str] -_ReprArgType: TypeAlias = Union[bool, _ReprType] -_OnSetAttrType: TypeAlias = Callable[[Any, Attribute[Any], Any], Any] -_OnSetAttrArgType: TypeAlias = Union[_OnSetAttrType, List[_OnSetAttrType], setters._NoOpType] -_FieldTransformer: TypeAlias = Callable[[type, List[Attribute[Any]]], List[Attribute[Any]]] -_ValidatorArgType: TypeAlias = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] - -class AttrsInstance(AttrsInstance_, Protocol): ... - -_A = TypeVar('_A', bound=AttrsInstance) - -class _Nothing(enum.Enum): - NOTHING = ... - -NOTHING: enum.Enum = ... - -@overload -def Factory(factory: Callable[[], _T]) -> _T: ... -@overload -def Factory(factory: Callable[[Any], _T], takes_self: Literal[True]) -> _T: ... -@overload -def Factory(factory: Callable[[], _T], takes_self: Literal[False]) -> _T: ... - -class _CountingAttr: - counter: int - _default: Any - repr: _ReprArgType - cmp: _EqOrderType - eq: _EqOrderType - eq_key: str - order: _EqOrderType - order_key: str - hash: bool | None - init: bool - converter: _ConverterType | None - metadata: dict[Any, Any] - _validator: _ValidatorType[Any] | None - type: type[Any] | None - kw_only: bool - on_setattr: _OnSetAttrType - alias: str | None - -class Attribute(Generic[_T]): - name: str - default: _T | None - validator: _ValidatorType[_T] | None - repr: _ReprArgType - cmp: _EqOrderType - eq: _EqOrderType - order: _EqOrderType - hash: bool | None - init: bool - converter: _ConverterType | None - metadata: dict[Any, Any] - type: type[_T] | None - kw_only: bool - on_setattr: _OnSetAttrType - alias: str | None - def evolve(self, **changes: Any) -> Attribute[Any]: ... - @classmethod - def from_counting_attr(cls, name: str, ca: _CountingAttr, type: Type[Any] | None = None) -> Attribute[_T]: ... - -# NOTE: We had several choices for the annotation to use for type arg: -# 1) Type[_T] -# - Pros: Handles simple cases correctly -# - Cons: Might produce less informative errors in the case of conflicting -# TypeVars e.g. `attr.ib(default='bad', type=int)` -# 2) Callable[..., _T] -# - Pros: Better error messages than #1 for conflicting TypeVars -# - Cons: Terrible error messages for validator checks. -# e.g. attr.ib(type=int, validator=validate_str) -# -> error: Cannot infer function type argument -# 3) type (and do all of the work in the mypy plugin) -# - Pros: Simple here, and we could customize the plugin with our own errors. -# - Cons: Would need to write mypy plugin code to handle all the cases. -# We chose option #1. - -# `attr` lies about its return type to make the following possible: -# attr() -> Any -# attr(8) -> int -# attr(validator=) -> Whatever the callable expects. -# This makes this type of assignments possible: -# x: int = attr(8) -# -# This form catches explicit None or no default but with no other arguments -# returns Any. -@overload -def attrib( - default: None = ..., - validator: None = ..., - repr: _ReprArgType = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: None = ..., - converter: None = ..., - factory: None = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., -) -> Any: ... - -# This form catches an explicit None or no default and infers the type from the -# other arguments. -@overload -def attrib( - default: None = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: Optional[Type[_T]] = ..., - converter: Optional[_ConverterType] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., -) -> _T: ... - -# This form catches an explicit default argument. -@overload -def attrib( - default: _T, - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: Optional[Type[_T]] = ..., - converter: Optional[_ConverterType] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., -) -> _T: ... - -# This form covers type=non-Type: e.g. forward references (str), Any -@overload -def attrib( - default: Optional[_T] = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: object = ..., - converter: Optional[_ConverterType] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., -) -> Any: ... -@overload -def field( - *, - default: None = ..., - validator: None = ..., - repr: _ReprArgType = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - converter: None = ..., - factory: None = ..., - kw_only: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., - type: Optional[type] = ..., -) -> Any: ... - -# This form catches an explicit None or no default and infers the type from the -# other arguments. -@overload -def field( - *, - default: None = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - converter: Optional[_ConverterType] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., - type: Optional[type] = ..., -) -> _T: ... - -# This form catches an explicit default argument. -@overload -def field( - *, - default: _T, - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - converter: Optional[_ConverterType] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., - type: Optional[type] = ..., -) -> _T: ... - -# This form covers type=non-Type: e.g. forward references (str), Any -@overload -def field( - *, - default: Optional[_T] = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - converter: Optional[_ConverterType] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - alias: Optional[str] = ..., - type: Optional[type] = ..., -) -> Any: ... -@overload -@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) -def attrs( - maybe_cls: _C, - these: Optional[Dict[str, Any]] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - auto_detect: bool = ..., - collect_by_mro: bool = ..., - getstate_setstate: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., - match_args: bool = ..., - unsafe_hash: Optional[bool] = ..., -) -> _C: ... -@overload -@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) -def attrs( - maybe_cls: None = ..., - these: Optional[Dict[str, Any]] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - auto_detect: bool = ..., - collect_by_mro: bool = ..., - getstate_setstate: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., - match_args: bool = ..., - unsafe_hash: Optional[bool] = ..., -) -> Callable[[_C], _C]: ... -@overload -@dataclass_transform(field_specifiers=(attrib, field)) -def define( - maybe_cls: _C, - *, - these: Optional[Dict[str, Any]] = ..., - repr: bool = ..., - unsafe_hash: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., - auto_detect: bool = ..., - getstate_setstate: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., - match_args: bool = ..., -) -> _C: ... -@overload -@dataclass_transform(field_specifiers=(attrib, field)) -def define( - maybe_cls: None = ..., - *, - these: Optional[Dict[str, Any]] = ..., - repr: bool = ..., - unsafe_hash: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., - auto_detect: bool = ..., - getstate_setstate: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., - match_args: bool = ..., -) -> Callable[[_C], _C]: ... - -mutable = define - -@overload -@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) -def frozen( - maybe_cls: _C, - *, - these: Optional[Dict[str, Any]] = ..., - repr: bool = ..., - unsafe_hash: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., - auto_detect: bool = ..., - getstate_setstate: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., - match_args: bool = ..., -) -> _C: ... -@overload -@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) -def frozen( - maybe_cls: None = ..., - *, - these: Optional[Dict[str, Any]] = ..., - repr: bool = ..., - unsafe_hash: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., - auto_detect: bool = ..., - getstate_setstate: Optional[bool] = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., - match_args: bool = ..., -) -> Callable[[_C], _C]: ... -def fields(cls: Type[AttrsInstance]) -> Any: ... -def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ... -def validate(inst: AttrsInstance) -> None: ... -@overload -def resolve_types( - cls: Type[_A], - globalns: Optional[Dict[str, Any]] = ..., - localns: Optional[Dict[str, Any]] = ..., - attribs: Optional[List[Attribute[Any]]] = ..., - include_extras: bool = ..., -) -> Type[_A]: ... -@overload -def resolve_types( - cls: _A, - globalns: Optional[Dict[str, Any]] = ..., - localns: Optional[Dict[str, Any]] = ..., - attribs: Optional[List[Attribute[Any]]] = ..., - include_extras: bool = ..., -) -> _A: ... - -# TODO: add support for returning a proper attrs class from the mypy plugin -# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', -# [attr.ib()])` is valid -def make_class( - name: str, - attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], - bases: Tuple[type, ...] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: Optional[_EqOrderType] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[_EqOrderType] = ..., - order: Optional[_EqOrderType] = ..., - collect_by_mro: bool = ..., - on_setattr: Optional[_OnSetAttrArgType] = ..., - field_transformer: Optional[_FieldTransformer] = ..., -) -> type: ... - -# _funcs -- - -# TODO: add support for returning TypedDict from the mypy plugin -# FIXME: asdict/astuple do not honor their factory args. Waiting on one of -# these: -# https://github.com/python/mypy/issues/4236 -# https://github.com/python/typing/issues/253 -# XXX: remember to fix attrs.asdict/astuple too! -def asdict( - inst: AttrsInstance, - recurse: bool = ..., - filter: Optional[_FilterType[Any]] = ..., - dict_factory: Type[Mapping[Any, Any]] = ..., - retain_collection_types: bool = ..., - value_serializer: Optional[Callable[[type, Attribute[Any], Any], Any]] = ..., - tuple_keys: Optional[bool] = ..., -) -> Dict[str, Any]: ... - -# TODO: add support for returning NamedTuple from the mypy plugin -def astuple( - inst: AttrsInstance, - recurse: bool = ..., - filter: Optional[_FilterType[Any]] = ..., - tuple_factory: Type[Sequence[Any]] = ..., - retain_collection_types: bool = ..., -) -> Tuple[Any, ...]: ... -def has(cls: type) -> TypeGuard[Type[AttrsInstance]]: ... -def assoc(inst: _T, **changes: Any) -> _T: ... -def evolve(inst: _T, **changes: Any) -> _T: ... - -# _config -- -def set_run_validators(run: bool) -> None: ... -def get_run_validators() -> bool: ... - -# aliases -- -s = attrs -attributes = attrs -ib = attrib -attr = attrib -dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) - -class ReprProtocol(Protocol): - def __call__(__self, self: Any) -> str: ... -def _make_init( - cls: type[AttrsInstance], - attrs: tuple[Attribute[Any], ...], - pre_init: bool, - post_init: bool, - frozen: bool, - slots: bool, - cache_hash: bool, - base_attr_map: dict[Any, Any], - is_exc: bool, - cls_on_setattr: Any, - attrs_init: bool, -) -> Callable[_P, Any]: ... -def _make_repr(attrs: tuple[Attribute[Any]], ns: str | None, cls: AttrsInstance) -> ReprProtocol: ... -def _transform_attrs( - cls: type[AttrsInstance], - these: dict[str, _CountingAttr] | None, - auto_attribs: bool, - kw_only: bool, - collect_by_mro: bool, - field_transformer: _FieldTransformer | None, -) -> tuple[tuple[Attribute[Any], ...], tuple[Attribute[Any], ...], dict[Attribute[Any], type[Any]]]: ... diff --git a/typings/attr/_cmp.pyi b/typings/attr/_cmp.pyi deleted file mode 100644 index 4f396290..00000000 --- a/typings/attr/_cmp.pyi +++ /dev/null @@ -1,14 +0,0 @@ -import sys - -from typing import Any -from typing import Callable -from typing import Optional - -if sys.version_info[:2] >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - -_CompareWithType: TypeAlias = Callable[[Any, Any], bool] - -def cmp_using(eq: Optional[_CompareWithType] = ..., lt: Optional[_CompareWithType] = ..., le: Optional[_CompareWithType] = ..., gt: Optional[_CompareWithType] = ..., ge: Optional[_CompareWithType] = ..., require_same_type: bool = ..., class_name: str = ...,) -> type[Any]: ... diff --git a/typings/attr/_compat.pyi b/typings/attr/_compat.pyi deleted file mode 100644 index 72c4de67..00000000 --- a/typings/attr/_compat.pyi +++ /dev/null @@ -1,7 +0,0 @@ -import threading - -from typing import Any - -def set_closure_cell(cell: Any, value: Any) -> None: ... - -repr_context: threading.local = ... diff --git a/typings/attr/_make.pyi b/typings/attr/_make.pyi deleted file mode 100644 index ce62f42c..00000000 --- a/typings/attr/_make.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from . import _CountingAttr as _CountingAttr -from . import _make_init as _make_init -from . import _make_repr as _make_repr -from . import _transform_attrs as _transform_attrs diff --git a/typings/attr/_typing_compat.pyi b/typings/attr/_typing_compat.pyi deleted file mode 100644 index b6c68a0c..00000000 --- a/typings/attr/_typing_compat.pyi +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Any -from typing import ClassVar -from typing import Protocol - -# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. -MYPY: bool = False - -if MYPY: - # A protocol to be able to statically accept an attrs class. - class AttrsInstance_(Protocol): - __attrs_attrs__: ClassVar[Any] - -else: - # For type checkers without plug-in support use an empty protocol that - # will (hopefully) be combined into a union. - class AttrsInstance_(Protocol): ... diff --git a/typings/attr/_version_info.pyi b/typings/attr/_version_info.pyi deleted file mode 100644 index 45ced086..00000000 --- a/typings/attr/_version_info.pyi +++ /dev/null @@ -1,9 +0,0 @@ -class VersionInfo: - @property - def year(self) -> int: ... - @property - def minor(self) -> int: ... - @property - def micro(self) -> int: ... - @property - def releaselevel(self) -> str: ... diff --git a/typings/attr/converters.pyi b/typings/attr/converters.pyi deleted file mode 100644 index 419a1d87..00000000 --- a/typings/attr/converters.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Callable -from typing import TypeVar -from typing import overload - -from . import _ConverterType - -_T = TypeVar('_T') - -def pipe(*validators: _ConverterType) -> _ConverterType: ... -def optional(converter: _ConverterType) -> _ConverterType: ... -@overload -def default_if_none(default: _T) -> _ConverterType: ... -@overload -def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... -def to_bool(val: str) -> bool: ... diff --git a/typings/attr/exceptions.pyi b/typings/attr/exceptions.pyi deleted file mode 100644 index f2680118..00000000 --- a/typings/attr/exceptions.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Any - -class FrozenError(AttributeError): - msg: str = ... - -class FrozenInstanceError(FrozenError): ... -class FrozenAttributeError(FrozenError): ... -class AttrsAttributeNotFoundError(ValueError): ... -class NotAnAttrsClassError(ValueError): ... -class DefaultAlreadySetError(RuntimeError): ... -class UnannotatedAttributeError(RuntimeError): ... -class PythonTooOldError(RuntimeError): ... - -class NotCallableError(TypeError): - msg: str = ... - value: Any = ... - def __init__(self, msg: str, value: Any) -> None: ... diff --git a/typings/attr/filters.pyi b/typings/attr/filters.pyi deleted file mode 100644 index 511218e4..00000000 --- a/typings/attr/filters.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Any -from typing import Union - -from . import Attribute -from . import _FilterType - -def include(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... -def exclude(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/typings/attr/setters.pyi b/typings/attr/setters.pyi deleted file mode 100644 index 1c7b7128..00000000 --- a/typings/attr/setters.pyi +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Any -from typing import NewType -from typing import NoReturn -from typing import TypeVar - -from . import Attribute -from . import _OnSetAttrType - -_T = TypeVar('_T') - -def frozen(instance: Any, attribute: Attribute[Any], new_value: Any) -> NoReturn: ... -def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... -def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... -def convert(instance: Any, attribute: Attribute[Any], new_value: Any) -> Any: ... - -_NoOpType = NewType('_NoOpType', object) -NO_OP: _NoOpType diff --git a/typings/attr/validators.pyi b/typings/attr/validators.pyi deleted file mode 100644 index a1bf843d..00000000 --- a/typings/attr/validators.pyi +++ /dev/null @@ -1,74 +0,0 @@ -from typing import Any -from typing import AnyStr -from typing import Callable -from typing import Container -from typing import ContextManager -from typing import Iterable -from typing import List -from typing import Mapping -from typing import Match -from typing import Optional -from typing import Pattern -from typing import Tuple -from typing import Type -from typing import TypeVar -from typing import Union -from typing import overload - -from . import _ValidatorArgType -from . import _ValidatorType - -_T = TypeVar('_T') -_T1 = TypeVar('_T1') -_T2 = TypeVar('_T2') -_T3 = TypeVar('_T3') -_I = TypeVar('_I', bound=Iterable[Any]) -_K = TypeVar('_K') -_V = TypeVar('_V') -_M = TypeVar('_M', bound=Mapping[Any, Any]) - -def set_disabled(run: bool) -> None: ... -def get_disabled() -> bool: ... -def disabled() -> ContextManager[None]: ... -@overload -def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... -@overload -def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... -@overload -def instance_of(type: Tuple[Type[_T1], Type[_T2]]) -> _ValidatorType[Union[_T1, _T2]]: ... -@overload -def instance_of(type: Tuple[Type[_T1], Type[_T2], Type[_T3]]) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... -@overload -def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... -def provides(interface: Any) -> _ValidatorType[Any]: ... -def optional( - validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]], Tuple[_ValidatorType[_T]]] -) -> _ValidatorType[Optional[_T]]: ... -def in_(options: Container[_T]) -> _ValidatorType[_T]: ... -def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... -def matches_re( - regex: Union[Pattern[AnyStr], AnyStr], - flags: int = ..., - func: Optional[Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]]] = ..., -) -> _ValidatorType[AnyStr]: ... -def deep_iterable( - member_validator: _ValidatorArgType[_T], iterable_validator: Optional[_ValidatorType[_I]] = ... -) -> _ValidatorType[_I]: ... -def deep_mapping( - key_validator: _ValidatorType[_K], - value_validator: _ValidatorType[_V], - mapping_validator: Optional[_ValidatorType[_M]] = ..., -) -> _ValidatorType[_M]: ... -def is_callable() -> _ValidatorType[_T]: ... -def lt(val: _T) -> _ValidatorType[_T]: ... -def le(val: _T) -> _ValidatorType[_T]: ... -def ge(val: _T) -> _ValidatorType[_T]: ... -def gt(val: _T) -> _ValidatorType[_T]: ... -def max_len(length: int) -> _ValidatorType[_T]: ... -def min_len(length: int) -> _ValidatorType[_T]: ... -def not_( - validator: _ValidatorType[_T], - *, - msg: Optional[str] = ..., - exc_types: Union[Type[Exception], Iterable[Type[Exception]]] = ..., -) -> _ValidatorType[_T]: ... diff --git a/typings/click_option_group/__init__.pyi b/typings/click_option_group/__init__.pyi deleted file mode 100644 index 031d9a4c..00000000 --- a/typings/click_option_group/__init__.pyi +++ /dev/null @@ -1,30 +0,0 @@ -from ._core import AllOptionGroup -from ._core import GroupedOption -from ._core import MutuallyExclusiveOptionGroup -from ._core import OptionGroup -from ._core import RequiredAllOptionGroup -from ._core import RequiredAnyOptionGroup -from ._core import RequiredMutuallyExclusiveOptionGroup -from ._decorators import optgroup -from ._version import __version__ - -''' -click-option-group -~~~~~~~~~~~~~~~~~~ - -Option groups missing in Click - -:copyright: © 2019-2020 by Eugene Prilepin -:license: BSD, see LICENSE for more details. -''' -__all__ = [ - '__version__', - 'optgroup', - 'GroupedOption', - 'OptionGroup', - 'RequiredAnyOptionGroup', - 'AllOptionGroup', - 'RequiredAllOptionGroup', - 'MutuallyExclusiveOptionGroup', - 'RequiredMutuallyExclusiveOptionGroup', -] diff --git a/typings/click_option_group/_core.pyi b/typings/click_option_group/_core.pyi deleted file mode 100644 index dd772f89..00000000 --- a/typings/click_option_group/_core.pyi +++ /dev/null @@ -1,86 +0,0 @@ -import sys - -from typing import Any -from typing import Callable -from typing import Dict -from typing import List -from typing import Mapping -from typing import Optional -from typing import Sequence -from typing import Set -from typing import Tuple -from typing import TypeVar -from typing import Union - -import click - -if sys.version_info[:2] >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - -AnyCallable: TypeAlias = Callable[..., Any] -_FC = TypeVar('_FC', bound=Union[AnyCallable, click.Command]) - -class GroupedOption(click.Option): - def __init__(self, param_decls: Optional[Sequence[str]] = ..., *, group: OptionGroup, **attrs: Any) -> None: ... - @property - def group(self) -> OptionGroup: ... - def handle_parse_result( - self, ctx: click.Context, opts: Mapping[str, Any], args: List[str] - ) -> Tuple[Any, List[str]]: ... - def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]: ... - -class _GroupTitleFakeOption(click.Option): - def __init__(self, param_decls: Optional[Sequence[str]] = ..., *, group: OptionGroup, **attrs: Any) -> None: ... - def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]: ... - -class OptionGroup: - def __init__(self, name: Optional[str] = ..., *, hidden: bool = ..., help: Optional[str] = ...) -> None: ... - @property - def name(self) -> str: ... - @property - def help(self) -> str: ... - @property - def name_extra(self) -> List[str]: ... - @property - def forbidden_option_attrs(self) -> List[str]: ... - def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]: ... - def option(self, *param_decls: Any, **attrs: Any) -> Callable[[_FC], _FC]: ... - def get_options(self, ctx: click.Context) -> Dict[str, GroupedOption]: ... - def get_option_names(self, ctx: click.Context) -> List[str]: ... - def get_error_hint(self, ctx: click.Context, option_names: Optional[Set[str]] = ...) -> str: ... - def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None: ... - -class RequiredAnyOptionGroup(OptionGroup): - @property - def forbidden_option_attrs(self) -> List[str]: ... - @property - def name_extra(self) -> List[str]: ... - def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None: ... - -class RequiredAllOptionGroup(OptionGroup): - @property - def forbidden_option_attrs(self) -> List[str]: ... - @property - def name_extra(self) -> List[str]: ... - def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None: ... - -class MutuallyExclusiveOptionGroup(OptionGroup): - @property - def forbidden_option_attrs(self) -> List[str]: ... - @property - def name_extra(self) -> List[str]: ... - def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None: ... - -class RequiredMutuallyExclusiveOptionGroup(MutuallyExclusiveOptionGroup): - @property - def name_extra(self) -> List[str]: ... - def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None: ... - -class AllOptionGroup(OptionGroup): - @property - def forbidden_option_attrs(self) -> List[str]: ... - @property - def name_extra(self) -> List[str]: ... - def handle_parse_result(self, option: GroupedOption, ctx: click.Context, opts: Mapping[str, Any]) -> None: ... diff --git a/typings/click_option_group/_decorators.pyi b/typings/click_option_group/_decorators.pyi deleted file mode 100644 index 547f3cab..00000000 --- a/typings/click_option_group/_decorators.pyi +++ /dev/null @@ -1,73 +0,0 @@ -from typing import Any -from typing import Callable -from typing import Dict -from typing import List -from typing import NamedTuple -from typing import Optional -from typing import Tuple -from typing import Type -from typing import TypeVar -from typing import Union -from typing import overload - -import click - -from ._core import _FC -from ._core import AnyCallable -from ._core import OptionGroup - -class OptionStackItem(NamedTuple): - param_decls: Tuple[str, ...] - attrs: Dict[str, Any] - param_count: int - -class _NotAttachedOption(click.Option): - def __init__(self, param_decls: Any = ..., *, all_not_attached_options: Any, **attrs: Any) -> None: ... - def handle_parse_result(self, ctx: click.Context, opts: Any, args: List[str]) -> Any: ... - -_GrpType = TypeVar('_GrpType', bound=OptionGroup) - -class _OptGroup: - def __init__(self) -> None: ... - def __call__( - self, - name: str, - *, - help: Optional[str] = None, - cls: Optional[Type[_GrpType]] = None, - **attrs: Any, - ) -> Union[click.Command, Callable[[AnyCallable], click.Command]]: ... - @overload - def group( - self, - name: str, - *, - cls: type[_GrpType], - **attrs: Any, - ) -> Callable[[AnyCallable], click.Command]: ... - @overload - def group( - self, - name: str, - cls: None, - **attrs: Any, - ) -> Callable[[AnyCallable], click.Command]: ... - @overload - def group( - self, - *, - name: str, - **attrs: Any, - ) -> Callable[[AnyCallable], click.Command]: ... - @overload - def group( - self, - name: Optional[str] = ..., - *, - help: Optional[str] = ..., - cls: Optional[Type[_GrpType]] = None, - **attrs: Any, - ) -> Union[click.Command, Callable[[AnyCallable], click.Command]]: ... - def option(self, *param_decls: Any, **attrs: Any) -> Callable[[_FC], _FC]: ... - -optgroup: _OptGroup = ... diff --git a/typings/click_option_group/_version.pyi b/typings/click_option_group/_version.pyi deleted file mode 100644 index 78575a03..00000000 --- a/typings/click_option_group/_version.pyi +++ /dev/null @@ -1,3 +0,0 @@ -'''This type stub file was generated by pyright.''' - -__version__ = ... diff --git a/typings/cuda/__init__.pyi b/typings/cuda/__init__.pyi deleted file mode 100644 index e76cccb8..00000000 --- a/typings/cuda/__init__.pyi +++ /dev/null @@ -1,2 +0,0 @@ -from . import cuda as cuda -from . import cudart as cudart diff --git a/typings/cuda/cuda.pyi b/typings/cuda/cuda.pyi deleted file mode 100644 index 16d5ea39..00000000 --- a/typings/cuda/cuda.pyi +++ /dev/null @@ -1,13 +0,0 @@ -from enum import Enum - -class CUresult(Enum): - CUDA_SUCCESS = 0 - -class _CUMixin: - def getPtr(self) -> int: ... - -class CUdevice(_CUMixin): ... - -def cuDeviceGetCount() -> tuple[CUresult, int]: ... -def cuDeviceGet(dev: int) -> tuple[CUresult, CUdevice]: ... -def cuInit(flags: int) -> tuple[CUresult]: ... diff --git a/typings/cuda/cudart.pyi b/typings/cuda/cudart.pyi deleted file mode 100644 index e69de29b..00000000 diff --git a/typings/deepmerge/__init__.pyi b/typings/deepmerge/__init__.pyi deleted file mode 100644 index 8dff0130..00000000 --- a/typings/deepmerge/__init__.pyi +++ /dev/null @@ -1,10 +0,0 @@ -from typing import List -from typing import Literal -from typing import Tuple - -from .merger import Merger - -DEFAULT_TYPE_SPECIFIC_MERGE_STRATEGIES: List[Tuple[type, Literal['append', 'merge', 'union']]] = ... -always_merger: Merger = ... -merge_or_raise: Merger = ... -conservative_merger: Merger = ... diff --git a/typings/deepmerge/merger.pyi b/typings/deepmerge/merger.pyi deleted file mode 100644 index b0bf606d..00000000 --- a/typings/deepmerge/merger.pyi +++ /dev/null @@ -1,31 +0,0 @@ -import sys - -from typing import Any -from typing import Dict -from typing import List -from typing import Tuple -from typing import Union - -if sys.version_info[:2] >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias -from .strategy.core import StrategyList -from .strategy.dict import DictStrategies -from .strategy.list import ListStrategies -from .strategy.set import SetStrategies - -ConfigDictType: TypeAlias = Dict[str, Any] - -class Merger: - PROVIDED_TYPE_STRATEGIES: Dict[type, Union[ListStrategies, DictStrategies, SetStrategies]] = ... - - def __init__( - self, - type_strategies: List[Tuple[type, str]], - fallback_strategies: List[str], - type_conflict_strategies: List[str], - ) -> None: ... - def merge(self, base: ConfigDictType, nxt: ConfigDictType) -> ConfigDictType: ... - def type_conflict_strategy(self, *args: Any) -> Any: ... - def value_strategy(self, path: str, base: StrategyList, nxt: StrategyList) -> None: ... diff --git a/typings/deepmerge/strategy/core.pyi b/typings/deepmerge/strategy/core.pyi deleted file mode 100644 index 2f76b20d..00000000 --- a/typings/deepmerge/strategy/core.pyi +++ /dev/null @@ -1,23 +0,0 @@ -import sys - -from typing import Any -from typing import Callable -from typing import List -from typing import Optional -from typing import Union - -if sys.version_info[:2] >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - - -_StringOrFunction: TypeAlias = Union[str, Callable[..., Any]] -STRATEGY_END: object = ... - -class StrategyList: - NAME: Optional[str] = ... - def __init__(self, strategy_list: Union[_StringOrFunction, List[_StringOrFunction]]) -> None: ... - @classmethod - def _expand_strategy(cls, strategy: _StringOrFunction) -> _StringOrFunction: ... - def __call__(self, *args: Any, **kwargs: Any) -> Any: ... diff --git a/typings/deepmerge/strategy/dict.pyi b/typings/deepmerge/strategy/dict.pyi deleted file mode 100644 index 50087412..00000000 --- a/typings/deepmerge/strategy/dict.pyi +++ /dev/null @@ -1,8 +0,0 @@ -from .core import StrategyList -from ..merger import Merger - -class DictStrategies(StrategyList): - @staticmethod - def strategy_merge(config: Merger, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... - @staticmethod - def strategy_override(config: Merger, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... diff --git a/typings/deepmerge/strategy/list.pyi b/typings/deepmerge/strategy/list.pyi deleted file mode 100644 index c3a1f0ea..00000000 --- a/typings/deepmerge/strategy/list.pyi +++ /dev/null @@ -1,12 +0,0 @@ -from .core import StrategyList -from ..merger import Merger - -class ListStrategies(StrategyList): - NAME: str = ... - - @staticmethod - def strategy_override(config: Merger, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... - @staticmethod - def strategy_prepend(config: Merger, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... - @staticmethod - def strategy_append(config: Merger, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... diff --git a/typings/deepmerge/strategy/set.pyi b/typings/deepmerge/strategy/set.pyi deleted file mode 100644 index 1a888433..00000000 --- a/typings/deepmerge/strategy/set.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Any - -from .core import StrategyList -from ..merger import Merger - -class SetStrategies(StrategyList): - NAME: str | None = ... - - @staticmethod - def strategy_union(config: Any, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... - @staticmethod - def strategy_intersect(config: Any, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... - @staticmethod - def strategy_override(config: Merger, path: str, base: StrategyList, nxt: StrategyList) -> StrategyList: ... diff --git a/typings/jupytext/__init__.pyi b/typings/jupytext/__init__.pyi deleted file mode 100644 index 5bfee0b3..00000000 --- a/typings/jupytext/__init__.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from .formats import NOTEBOOK_EXTENSIONS as NOTEBOOK_EXTENSIONS -from .formats import get_format_implementation as get_format_implementation -from .formats import guess_format as guess_format -from .jupytext import read as read -from .jupytext import reads as reads -from .jupytext import write as write -from .jupytext import writes as writes diff --git a/typings/jupytext/config.pyi b/typings/jupytext/config.pyi deleted file mode 100644 index 3dea3c1b..00000000 --- a/typings/jupytext/config.pyi +++ /dev/null @@ -1,50 +0,0 @@ -from collections.abc import Generator -from typing import Any -from typing import Dict - -from _typeshed import Incomplete - -from .formats import NOTEBOOK_EXTENSIONS as NOTEBOOK_EXTENSIONS - -class JupytextConfigurationError(ValueError): ... - -JUPYTEXT_CONFIG_FILES: Incomplete -PYPROJECT_FILE: str -JUPYTEXT_CEILING_DIRECTORIES: Incomplete - -class JupytextConfiguration: - formats: Incomplete - default_jupytext_formats: Incomplete - preferred_jupytext_formats_save: Incomplete - preferred_jupytext_formats_read: Incomplete - notebook_metadata_filter: Incomplete - default_notebook_metadata_filter: Incomplete - hide_notebook_metadata: Incomplete - root_level_metadata_as_raw_cell: Incomplete - cell_metadata_filter: Incomplete - default_cell_metadata_filter: Incomplete - comment_magics: Incomplete - split_at_heading: Incomplete - sphinx_convert_rst2md: Incomplete - doxygen_equation_markers: Incomplete - outdated_text_notebook_margin: Incomplete - cm_config_log_level: Incomplete - cell_markers: Incomplete - default_cell_markers: Incomplete - notebook_extensions: Incomplete - custom_cell_magics: Incomplete - def set_default_format_options(self, format_options: Any, read: bool = ...) -> None: ... - def default_formats(self, path: str) -> Any: ... - def __eq__(self, other: object) -> bool: ... - -def preferred_format(incomplete_format: Any, preferred_formats: Any) -> Any: ... -def global_jupytext_configuration_directories() -> Generator[Incomplete, Incomplete, None]: ... -def find_global_jupytext_configuration_file() -> Any: ... -def find_jupytext_configuration_file(path: str, search_parent_dirs: bool = ...) -> Any: ... -def parse_jupytext_configuration_file(jupytext_config_file: str, stream: Incomplete | None = ...) -> Any: ... -def load_jupytext_configuration_file(config_file: str, stream: Incomplete | None = ...) -> Any: ... -def load_jupytext_config(nb_file: str) -> JupytextConfiguration: ... -def validate_jupytext_configuration_file(config_file: str, config_dict: Dict[str, Any]) -> None: ... -def notebook_formats( - nbk: Any, config: JupytextConfiguration, path: str, fallback_on_current_fmt: bool = ... -) -> Incomplete: ... diff --git a/typings/jupytext/formats.pyi b/typings/jupytext/formats.pyi deleted file mode 100644 index c9a21334..00000000 --- a/typings/jupytext/formats.pyi +++ /dev/null @@ -1,33 +0,0 @@ -from typing import Any - -from _typeshed import Incomplete - -class JupytextFormatError(ValueError): ... - -class NotebookFormatDescription: - format_name: Incomplete - extension: Incomplete - header_prefix: Incomplete - header_suffix: Incomplete - cell_reader_class: Incomplete - cell_exporter_class: Incomplete - current_version_number: Incomplete - min_readable_version_number: Incomplete - def __init__( - self, - format_name: str, - extension: str, - header_prefix: str, - cell_reader_class: Any, - cell_exporter_class: Any, - current_version_number: int, - header_suffix: str = ..., - min_readable_version_number: Incomplete | None = ..., - ) -> None: ... - -JUPYTEXT_FORMATS: Incomplete -NOTEBOOK_EXTENSIONS: Incomplete -EXTENSION_PREFIXES: Incomplete - -def get_format_implementation(ext: str, format_name: str = ...) -> NotebookFormatDescription: ... -def guess_format(text: str, ext: str) -> tuple[str, dict[str, Any]]: ... diff --git a/typings/jupytext/jupytext.pyi b/typings/jupytext/jupytext.pyi deleted file mode 100644 index ca0893ad..00000000 --- a/typings/jupytext/jupytext.pyi +++ /dev/null @@ -1,46 +0,0 @@ -from typing import IO -from typing import Any - -from _typeshed import Incomplete -from nbformat import NotebookNode -from nbformat.v4.rwbase import NotebookReader -from nbformat.v4.rwbase import NotebookWriter - -from .config import JupytextConfiguration - -class NotSupportedNBFormatVersion(NotImplementedError): ... - -class TextNotebookConverter(NotebookReader, NotebookWriter): - fmt: Incomplete - config: Incomplete - ext: Incomplete - implementation: Incomplete - def __init__(self, fmt: Any, config: JupytextConfiguration) -> None: ... - def update_fmt_with_notebook_options(self, metadata: Any, read: bool = ...) -> None: ... - def reads(self, s: str, **_: Any) -> NotebookNode: ... - def filter_notebook(self, nb: NotebookNode, metadata: Any) -> Any: ... - def writes(self, nb: NotebookNode, metadata: Incomplete | None = ..., **kwargs: Any) -> None: ... - -def reads( - text: str, - fmt: str = ..., - as_version: int = ..., - config: JupytextConfiguration | None = ..., - **kwargs: Any, -) -> NotebookNode: ... -def read( - fp: str | IO[Any], as_version: int = ..., fmt: str = ..., config: JupytextConfiguration | None = ..., **kwargs: Any -) -> NotebookNode: ... -def writes( - notebook: NotebookNode, fmt: str, version: int = ..., config: JupytextConfiguration | None = ..., **kwargs: Any -) -> None: ... -def drop_text_representation_metadata(notebook: NotebookNode, metadata: Any = ...) -> Any: ... -def write( - nb: NotebookNode, - fp: str, - version: int | None = ..., - fmt: str = ..., - config: JupytextConfiguration | None = ..., - **kwargs: Any, -) -> None: ... -def create_prefix_dir(nb_file: str, fmt: str) -> None: ... diff --git a/typings/nbformat/__init__.pyi b/typings/nbformat/__init__.pyi deleted file mode 100644 index 6f2bd555..00000000 --- a/typings/nbformat/__init__.pyi +++ /dev/null @@ -1,3 +0,0 @@ -from . import v4 as v4 -from .notebooknode import NotebookNode as NotebookNode -from .notebooknode import from_dict as from_dict diff --git a/typings/nbformat/_struct.pyi b/typings/nbformat/_struct.pyi deleted file mode 100644 index ffbe23bb..00000000 --- a/typings/nbformat/_struct.pyi +++ /dev/null @@ -1,30 +0,0 @@ -import sys - -from typing import Any -from typing import Dict - -if sys.version_info[:2] >= (3,11): - from typing import Self -else: - from typing_extensions import Self - -class Struct(Dict[str, Any]): - _allownew: bool = True - def __init__(self, *args: Any, **kw: Any) -> None: ... - def __setitem__(self, key: str, value: Any) -> None: ... - def __setattr__(self, key: str, value: Any) -> None: ... - def __getattr__(self, key: str) -> Any: ... - def __iadd__(self, other: Struct) -> Self: ... - def __add__(self, other: Struct) -> Struct: ... - def __sub__(self, other: Struct) -> Struct: ... - def __isub__(self, other: Struct) -> Self: ... - def dict(self) -> Self: ... - def copy(self) -> Self: ... - def hasattr(self, key: str) -> bool: ... - def allow_new_attr(self, allow: bool = True) -> None: ... - def merge( - self, - __loc_data__: Dict[str, Any] | Struct | None = ..., - __conflict_solve: Dict[str, Any] | None = ..., - **kw: Any, - ) -> Struct: ... diff --git a/typings/nbformat/notebooknode.pyi b/typings/nbformat/notebooknode.pyi deleted file mode 100644 index f8b52e4c..00000000 --- a/typings/nbformat/notebooknode.pyi +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Any - -from ._struct import Struct - -class NotebookNode(Struct): ... - -def from_dict(d: dict[str, Any]) -> NotebookNode: ... diff --git a/typings/nbformat/v4/__init__.pyi b/typings/nbformat/v4/__init__.pyi deleted file mode 100644 index 66936335..00000000 --- a/typings/nbformat/v4/__init__.pyi +++ /dev/null @@ -1,18 +0,0 @@ -from .convert import downgrade as downgrade -from .convert import upgrade as upgrade -from .nbbase import nbformat as nbformat -from .nbbase import nbformat_minor as nbformat_minor -from .nbbase import nbformat_schema as nbformat_schema -from .nbbase import new_code_cell as new_code_cell -from .nbbase import new_markdown_cell as new_markdown_cell -from .nbbase import new_notebook as new_notebook -from .nbbase import new_output as new_output -from .nbbase import new_raw_cell as new_raw_cell -from .nbbase import output_from_msg as output_from_msg -from .nbjson import reads as reads -from .nbjson import to_notebook as to_notebook -from .nbjson import writes as writes - -reads_json = reads -writes_json = writes -to_notebook_json = to_notebook diff --git a/typings/nbformat/v4/convert.pyi b/typings/nbformat/v4/convert.pyi deleted file mode 100644 index 02c07a63..00000000 --- a/typings/nbformat/v4/convert.pyi +++ /dev/null @@ -1,18 +0,0 @@ -from typing import Any - -from _typeshed import Incomplete - -from .nbbase import NotebookNode as NotebookNode -from .nbbase import nbformat as nbformat -from .nbbase import nbformat_minor as nbformat_minor - -def upgrade(nb: NotebookNode, from_version: Incomplete | None = ..., from_minor: Incomplete | None = ...) -> Any: ... -def upgrade_cell(cell: NotebookNode) -> Any: ... -def downgrade_cell(cell: NotebookNode) -> Any: ... -def to_mime_key(d: Any) -> Any: ... -def from_mime_key(d: Any) -> Any: ... -def upgrade_output(output: Any) -> Any: ... -def downgrade_output(output: Any) -> Any: ... -def upgrade_outputs(outputs: Any) -> Any: ... -def downgrade_outputs(outputs: Any) -> Any: ... -def downgrade(nb: NotebookNode) -> Any: ... diff --git a/typings/nbformat/v4/nbbase.pyi b/typings/nbformat/v4/nbbase.pyi deleted file mode 100644 index 64e46dc4..00000000 --- a/typings/nbformat/v4/nbbase.pyi +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Any - -from _typeshed import Incomplete -from nbformat.notebooknode import NotebookNode as NotebookNode - -nbformat: int -nbformat_minor: int -nbformat_schema: Incomplete - -def validate(node: NotebookNode, ref: Incomplete | None = ...) -> Any: ... -def new_output(output_type: Any, data: Incomplete | None = ..., **kwargs: Any) -> Any: ... -def output_from_msg(msg: str) -> Any: ... -def new_code_cell(source: str = ..., **kwargs: Any) -> NotebookNode: ... -def new_markdown_cell(source: str = ..., **kwargs: Any) -> NotebookNode: ... -def new_raw_cell(source: str = ..., **kwargs: Any) -> NotebookNode: ... -def new_notebook(**kwargs: Any) -> NotebookNode: ... diff --git a/typings/nbformat/v4/nbjson.pyi b/typings/nbformat/v4/nbjson.pyi deleted file mode 100644 index 8842094b..00000000 --- a/typings/nbformat/v4/nbjson.pyi +++ /dev/null @@ -1,30 +0,0 @@ -import json - -from typing import Any -from typing import Dict - -from _typeshed import Incomplete -from nbformat import NotebookNode -from nbformat.notebooknode import from_dict as from_dict - -from .rwbase import NotebookReader as NotebookReader -from .rwbase import NotebookWriter as NotebookWriter -from .rwbase import rejoin_lines as rejoin_lines -from .rwbase import split_lines as split_lines -from .rwbase import strip_transient as strip_transient - -class BytesEncoder(json.JSONEncoder): - def default(self, obj: Any) -> Any: ... - -class JSONReader(NotebookReader): - def reads(self, s: str, **kwargs: Any) -> NotebookNode: ... - def to_notebook(self, d: Dict[str, Any], **kwargs: Any) -> NotebookNode: ... - -class JSONWriter(NotebookWriter): - def writes(self, nb: NotebookNode, **kwargs: Any) -> None: ... - -reads: Incomplete -read: Incomplete -to_notebook: Incomplete -write: Incomplete -writes: Incomplete diff --git a/typings/nbformat/v4/rwbase.pyi b/typings/nbformat/v4/rwbase.pyi deleted file mode 100644 index 180129fc..00000000 --- a/typings/nbformat/v4/rwbase.pyi +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Any -from typing import TextIO - -from nbformat.notebooknode import NotebookNode - -def rejoin_lines(nb: NotebookNode) -> Any: ... -def split_lines(nb: NotebookNode) -> Any: ... -def strip_transient(nb: NotebookNode) -> Any: ... - -class NotebookReader: - def reads(self, s: str, **kwargs: Any) -> NotebookNode: ... - def read(self, fp: TextIO, **kwargs: Any) -> NotebookNode: ... - -class NotebookWriter: - def writes(self, nb: NotebookNode, **kwargs: Any) -> None: ... - def write(self, nb: NotebookNode, fp: TextIO, **kwargs: Any) -> None: ... diff --git a/typings/rsmiBindings.pyi b/typings/rsmiBindings.pyi deleted file mode 100644 index 83a9f069..00000000 --- a/typings/rsmiBindings.pyi +++ /dev/null @@ -1,46 +0,0 @@ -# See https://github.com/RadeonOpenCompute/rocm_smi_lib/blob/master/python_smi_tools/rsmiBindings.py -import ctypes -import sys - -from typing import Any -from typing import Literal - -if sys.version_info[:2] >= (3, 11): - from typing import LiteralString -else: - from typing_extensions import LiteralString - -class rocmsmi(ctypes.CDLL): - @staticmethod - def rsmi_num_monitor_devices(num_devices: ctypes._CArgObject) -> Any: ... -# Device ID -dv_id: ctypes.c_uint64 = ... -# GPU ID -gpu_id: ctypes.c_uint32 = ... - -# Policy enums -RSMI_MAX_NUM_FREQUENCIES: Literal[32] = ... -RSMI_MAX_FAN_SPEED: Literal[255] = ... -RSMI_NUM_VOLTAGE_CURVE_POINTS: Literal[3] = ... - -class rsmi_status_t(ctypes.c_int): - RSMI_STATUS_SUCCESS: Literal[0x0] = ... - RSMI_STATUS_INVALID_ARGS: Literal[0x1] = ... - RSMI_STATUS_NOT_SUPPORTED: Literal[0x2] = ... - RSMI_STATUS_FILE_ERROR: Literal[0x3] = ... - RSMI_STATUS_PERMISSION: Literal[0x4] = ... - RSMI_STATUS_OUT_OF_RESOURCES: Literal[0x5] = ... - RSMI_STATUS_INTERNAL_EXCEPTION: Literal[0x6] = ... - RSMI_STATUS_INPUT_OUT_OF_BOUNDS: Literal[0x7] = ... - RSMI_STATUS_INIT_ERROR: Literal[0x8] = ... - RSMI_INITIALIZATION_ERROR = RSMI_STATUS_INIT_ERROR - RSMI_STATUS_NOT_YET_IMPLEMENTED: Literal[0x9] = ... - RSMI_STATUS_NOT_FOUND: Literal[0xA] = ... - RSMI_STATUS_INSUFFICIENT_SIZE: Literal[0xB] = ... - RSMI_STATUS_INTERRUPT: Literal[0xC] = ... - RSMI_STATUS_UNEXPECTED_SIZE: Literal[0xD] = ... - RSMI_STATUS_NO_DATA: Literal[0xE] = ... - RSMI_STATUS_UNKNOWN_ERROR: Literal[0xFFFFFFFF] = ... - -# Dictionary of rsmi ret codes and it's verbose output -rsmi_status_verbose_err_out: dict[LiteralString, LiteralString] = ... diff --git a/typings/simple_di/__init__.pyi b/typings/simple_di/__init__.pyi deleted file mode 100644 index 51ca6e58..00000000 --- a/typings/simple_di/__init__.pyi +++ /dev/null @@ -1,36 +0,0 @@ -from typing import Any -from typing import Callable -from typing import Generator -from typing import Generic -from typing import Tuple -from typing import TypeVar -from typing import Union -from typing import overload - -from _typeshed import Incomplete - -class _SentinelClass: ... -_VT = TypeVar('_VT') - -class Provider(Generic[_VT]): - STATE_FIELDS: Tuple[str, ...] - def __init__(self) -> None: ... - def set(self, value: Union[_SentinelClass, _VT]) -> None: ... - def patch(self, value: Union[_SentinelClass, _VT]) -> Generator[None, None, None]: ... - def get(self) -> _VT: ... - def reset(self) -> None: ... - -class _ProvideClass: - def __getitem__(self, provider: Provider[_VT]) -> _VT: ... - -Provide: Incomplete -_AnyCallable = TypeVar('_AnyCallable', bound=Callable[..., Any]) - - -@overload -def inject(func: _AnyCallable) -> _AnyCallable: ... -@overload -def inject(func: None = ..., squeeze_none: bool = ...) -> Callable[[_AnyCallable], _AnyCallable]: ... -def sync_container(from_: Any, to_: Any) -> None: ... - -container: Incomplete diff --git a/typings/simple_di/providers.pyi b/typings/simple_di/providers.pyi deleted file mode 100644 index 03b31e93..00000000 --- a/typings/simple_di/providers.pyi +++ /dev/null @@ -1,56 +0,0 @@ -import sys - -from typing import Any -from typing import Callable as CallableType -from typing import Dict -from typing import Tuple -from typing import Union - -if sys.version_info[:2] >= (3, 10): - from typing import TypeAlias -else: - from typing_extensions import TypeAlias - - -from _typeshed import Incomplete - -from . import _VT -from . import Provider -from . import _SentinelClass - -class Placeholder(Provider[_VT]): ... - -class Static(Provider[_VT]): - STATE_FIELDS: Tuple[str, ...] - def __init__(self, value: _VT) -> None: ... - -class Factory(Provider[_VT]): - STATE_FIELDS: Tuple[str, ...] - def __init__(self, func: CallableType[..., _VT], *args: Any, **kwargs: Any) -> None: ... - -class SingletonFactory(Factory[_VT]): - STATE_FIELDS: Tuple[str, ...] - def __init__(self, func: CallableType[..., _VT], *args: Any, **kwargs: Any) -> None: ... -Callable = Factory -MemoizedCallable = SingletonFactory -ConfigDictType: TypeAlias = Dict[Union[str, int], Any] -PathItemType: TypeAlias = Union[int, str, Provider[int], Provider[str]] - -class Configuration(Provider[ConfigDictType]): - STATE_FIELDS: Tuple[str, ...] - fallback: Incomplete - def __init__(self, data: Union[_SentinelClass, ConfigDictType] = ..., fallback: Any = ...) -> None: ... - def set(self, value: Union[_SentinelClass, ConfigDictType]) -> None: ... - def get(self) -> Union[ConfigDictType, Any]: ... - def reset(self) -> None: ... - def __getattr__(self, name: str) -> _ConfigurationItem: ... - def __getitem__(self, key: PathItemType) -> _ConfigurationItem: ... - -class _ConfigurationItem(Provider[Any]): - STATE_FIELDS: Tuple[str, ...] - def __init__(self, config: Configuration, path: Tuple[PathItemType, ...]) -> None: ... - def set(self, value: Any) -> None: ... - def get(self) -> Any: ... - def reset(self) -> None: ... - def __getattr__(self, name: str) -> _ConfigurationItem: ... - def __getitem__(self, key: PathItemType) -> _ConfigurationItem: ...