fix(client): include openllm.client into main module [skip ci]

Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
Aaron
2023-08-11 06:14:59 -04:00
parent d0aaf80ca3
commit 785c1db237
11 changed files with 13 additions and 56 deletions

View File

@@ -13,7 +13,6 @@ on:
- ".github/workflows/build.yaml"
- "src/openllm/bundle/oci/Dockerfile"
- "src/openllm/**"
- "src/openllm_client/**"
env:
LINES: 120
COLUMNS: 120

View File

@@ -74,7 +74,6 @@ openllm/
├── examples # Usage demonstration scripts
├── src
│ ├── openllm # openllm core
│ ├── openllm_client # openllm python client
│ └── openllm-node # openllm nodejs library
├── tests # Automated Tests
├── tools # Utilities Script

View File

@@ -284,7 +284,7 @@ avoid-escape = false
"src/openllm/models/**" = ["I001", "E", "D", "F"]
"src/openllm/utils/__init__.py" = ["I001"]
"src/openllm/utils/import_utils.py" = ["PLW0603"]
"src/openllm_client/runtimes/*" = ["D107"]
"src/openllm/client/runtimes/*" = ["D107"]
"tests/**/*" = [
"S101",
"TID252",
@@ -395,7 +395,7 @@ exclude = [
"examples",
"tests",
]
include = ["src/openllm", "src/openllm_client"]
include = ["src/openllm"]
pythonVersion = "3.12"
reportMissingImports = "warning"
reportMissingTypeStubs = false
@@ -411,7 +411,7 @@ typeCheckingMode = "strict"
# TODO: Enable model for strict type checking
exclude = ["src/openllm/playground/", "src/openllm/utils/dummy_*.py", "src/openllm/models"]
local_partial_types = true
modules = ["openllm", "openllm_client"]
modules = ["openllm"]
mypy_path = "typings"
pretty = true
python_version = "3.11"
@@ -500,7 +500,6 @@ include = [
"src/openllm/_quantisation.py",
"src/openllm/_generation.py",
"src/openllm/_strategies.py",
"src/openllm/client.py",
"src/openllm/exceptions.py",
"src/openllm/testing.py",
]

View File

@@ -69,11 +69,7 @@ from ._factory import (
start_command_factory,
workers_per_resource_option,
)
from .. import (
bundle,
client as openllm_client,
serialisation,
)
from .. import bundle, serialisation
from ..exceptions import OpenLLMException
from ..models.auto import (
CONFIG_MAPPING,
@@ -109,10 +105,10 @@ from ..utils import (
if t.TYPE_CHECKING:
import torch
from openllm_client.runtimes.base import BaseClient
from bentoml._internal.bento import BentoStore
from bentoml._internal.container import DefaultBuilder
from openllm.client import BaseClient
from .._schema import EmbeddingsOutput
from .._types import DictStrAny, LiteralRuntime, P
@@ -713,7 +709,7 @@ def instruct_command(endpoint: str, timeout: int, agent: t.LiteralString, output
--text "¡Este es un API muy agradable!"
```
"""
client = openllm_client.HTTPClient(endpoint, timeout=timeout)
client = openllm.client.HTTPClient(endpoint, timeout=timeout)
try:
client.call("metadata")
@@ -745,7 +741,7 @@ def embed_command(ctx: click.Context, text: tuple[str, ...], endpoint: str, time
$ openllm embed --endpoint http://12.323.2.1:3000 "What is the meaning of life?" "How many stars are there in the sky?"
```
"""
client = t.cast("BaseClient[t.Any]", openllm_client.HTTPClient(endpoint, timeout=timeout) if server_type == "http" else openllm_client.GrpcClient(endpoint, timeout=timeout))
client = t.cast("BaseClient[t.Any]", openllm.client.HTTPClient(endpoint, timeout=timeout) if server_type == "http" else openllm.client.GrpcClient(endpoint, timeout=timeout))
try:
gen_embed = client.embed(text)
except ValueError:
@@ -778,7 +774,7 @@ def query_command(ctx: click.Context, /, prompt: str, endpoint: str, timeout: in
"""
_memoized = {k: orjson.loads(v[0]) for k, v in _memoized.items() if v}
if server_type == "grpc": endpoint = re.sub(r"http://", "", endpoint)
client = t.cast("BaseClient[t.Any]", openllm_client.HTTPClient(endpoint, timeout=timeout) if server_type == "http" else openllm_client.GrpcClient(endpoint, timeout=timeout))
client = t.cast("BaseClient[t.Any]", openllm.client.HTTPClient(endpoint, timeout=timeout) if server_type == "http" else openllm.client.GrpcClient(endpoint, timeout=timeout))
input_fg, generated_fg = "magenta", "cyan"
if output != "porcelain":
termui.echo("==Input==\n", fg="white")

View File

@@ -1,34 +0,0 @@
"""OpenLLM client.
To start interact with the server, you can do the following:
>>> import openllm
>>> client = openllm.client.HTTPClient("http://localhost:3000")
>>> client.query("What is the meaning of life?")
"""
from __future__ import annotations
import importlib
import typing as t
_import_structure: dict[str, list[str]] = {"runtimes.grpc": ["AsyncGrpcClient", "GrpcClient"], "runtimes.http": ["AsyncHTTPClient", "HTTPClient"], "runtimes.base": ["BaseClient", "BaseAsyncClient"]}
if t.TYPE_CHECKING:
from openllm_client.runtimes import (
AsyncGrpcClient as AsyncGrpcClient,
AsyncHTTPClient as AsyncHTTPClient,
BaseAsyncClient as BaseAsyncClient,
BaseClient as BaseClient,
GrpcClient as GrpcClient,
HTTPClient as HTTPClient,
)
_module = "openllm_client"
def __dir__() -> list[str]: return sorted(__all__)
def __getattr__(name: str) -> t.Any:
if name in _import_structure: return importlib.import_module(f".{name}", _module)
try: module = next(module for module, attrs in _import_structure.items() if name in attrs)
except StopIteration: raise AttributeError(f"module {_module} has no attribute {name}") from None
return getattr(importlib.import_module(f".{module}", _module), name)
# NOTE: Make sure to always keep this line at the bottom of the file. The update will be managed via tools/update-init-import.py
__all__=["AsyncGrpcClient","AsyncHTTPClient","BaseAsyncClient","BaseClient","GrpcClient","HTTPClient"]

View File

@@ -6,11 +6,11 @@ OpenLLM server. It is used to send requests to the server, and receive responses
"""
from __future__ import annotations
from .runtimes.grpc import (
from .runtimes import (
AsyncGrpcClient as AsyncGrpcClient,
GrpcClient as GrpcClient,
)
from .runtimes.http import (
AsyncHTTPClient as AsyncHTTPClient,
BaseAsyncClient as BaseAsyncClient,
BaseClient as BaseClient,
GrpcClient as GrpcClient,
HTTPClient as HTTPClient,
)

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
import asyncio
import logging

View File

@@ -1,4 +1,3 @@
from __future__ import annotations
import logging
import typing as t

View File

@@ -38,12 +38,12 @@ logger = logging.getLogger(__name__)
if t.TYPE_CHECKING:
import subprocess
from openllm_client.runtimes.base import BaseAsyncClient
from syrupy.assertion import SnapshotAssertion
from syrupy.types import PropertyFilter, PropertyMatcher, SerializableData, SerializedData
from openllm._configuration import GenerationConfig
from openllm._types import DictStrAny, ListAny
from openllm.client import BaseAsyncClient
else:
DictStrAny = dict