mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-04-29 03:13:44 -04:00
refactor: packages (#249)
This commit is contained in:
@@ -1,12 +1,8 @@
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import typing as t
|
||||
|
||||
import logging, typing as t, openllm
|
||||
from openllm_core._configuration import ModelSettings
|
||||
from hypothesis import strategies as st
|
||||
|
||||
import openllm
|
||||
from openllm._configuration import ModelSettings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
env_strats = st.sampled_from([openllm.utils.EnvVarMixin(model_name) for model_name in openllm.CONFIG_MAPPING.keys()])
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
import openllm
|
||||
|
||||
@pytest.mark.parametrize("name", ["AutoConfig", "AutoLLM", "AutoVLLM", "AutoFlaxLLM", "AutoTFLLM", "LLM", "LLMRunner", "LLMRunnable", "LLMEmbeddings",
|
||||
"Runner", "client", "exceptions", "bundle", "build", "ggml", "transformers", "import_model", "infer_auto_class",
|
||||
"infer_quantisation_config", "models", "list_models", "start", "start_grpc", "build", "serialisation"])
|
||||
def test_compiled_imports(name: str):
|
||||
assert getattr(openllm, name) is not None
|
||||
@@ -1,32 +1,10 @@
|
||||
from __future__ import annotations
|
||||
import contextlib
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import typing as t
|
||||
import contextlib, os, sys, typing as t, attr, pytest, transformers, openllm
|
||||
from unittest import mock
|
||||
|
||||
import attr
|
||||
import pytest
|
||||
import transformers
|
||||
from hypothesis import (
|
||||
assume,
|
||||
given,
|
||||
strategies as st,
|
||||
)
|
||||
|
||||
import openllm
|
||||
from openllm._configuration import GenerationConfig, ModelSettings, field_env_key
|
||||
|
||||
from openllm_core._configuration import GenerationConfig, ModelSettings, field_env_key
|
||||
from hypothesis import assume, given, strategies as st
|
||||
from ._strategies._configuration import make_llm_config, model_settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
DictStrAny = dict[str, t.Any]
|
||||
else:
|
||||
DictStrAny = dict
|
||||
|
||||
# XXX: @aarnphm fixes TypedDict behaviour in 3.11
|
||||
@pytest.mark.skipif(sys.version_info[:2] == (3, 11), reason="TypedDict in 3.11 behaves differently, so we need to fix this")
|
||||
def test_missing_default():
|
||||
@@ -43,7 +21,6 @@ def test_forbidden_access():
|
||||
assert pytest.raises(openllm.exceptions.ForbiddenAttributeError, cl_.__getattribute__, cl_(), "__config__",)
|
||||
assert pytest.raises(openllm.exceptions.ForbiddenAttributeError, cl_.__getattribute__, cl_(), "GenerationConfig",)
|
||||
assert pytest.raises(openllm.exceptions.ForbiddenAttributeError, cl_.__getattribute__, cl_(), "SamplingParams",)
|
||||
|
||||
assert openllm.utils.lenient_issubclass(cl_.__openllm_generation_class__, GenerationConfig)
|
||||
|
||||
@given(model_settings())
|
||||
@@ -133,7 +110,7 @@ def test_struct_envvar_with_overwrite_provided_env(monkeypatch: pytest.MonkeyPat
|
||||
assert sent.field1 == 20.0
|
||||
|
||||
@given(model_settings())
|
||||
@pytest.mark.parametrize(("return_dict", "typ"), [(True, DictStrAny), (False, transformers.GenerationConfig)])
|
||||
@pytest.mark.parametrize(("return_dict", "typ"), [(True, dict), (False, transformers.GenerationConfig)])
|
||||
def test_conversion_to_transformers(return_dict: bool, typ: type[t.Any], gen_settings: ModelSettings):
|
||||
cl_ = make_llm_config("ConversionLLM", gen_settings)
|
||||
assert isinstance(cl_().to_generation_config(return_as_dict=return_dict), typ)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
import itertools, os, typing as t, pytest, openllm
|
||||
if t.TYPE_CHECKING: from openllm._configuration import LiteralRuntime
|
||||
if t.TYPE_CHECKING: from openllm_core._typing_compat import LiteralRuntime
|
||||
|
||||
_FRAMEWORK_MAPPING = {"flan_t5": "google/flan-t5-small", "opt": "facebook/opt-125m", "baichuan": "baichuan-inc/Baichuan-7B",}
|
||||
_PROMPT_MAPPING = {"qa": "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?",}
|
||||
|
||||
@@ -4,7 +4,7 @@ from abc import ABC, abstractmethod
|
||||
import attr, docker, docker.errors, docker.types, orjson, pytest, openllm
|
||||
from syrupy.extensions.json import JSONSnapshotExtension
|
||||
from openllm._llm import normalise_model_name
|
||||
from openllm._typing_compat import DictStrAny, ListAny
|
||||
from openllm_core._typing_compat import DictStrAny, ListAny
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import typing as t
|
||||
import os, typing as t, pytest
|
||||
|
||||
import pytest
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import openllm
|
||||
if t.TYPE_CHECKING: import openllm
|
||||
|
||||
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") is not None, reason="Model is too large for CI")
|
||||
def test_flan_t5_implementation(prompt: str, llm: openllm.LLM[t.Any, t.Any]):
|
||||
|
||||
@@ -1,15 +1,7 @@
|
||||
from __future__ import annotations
|
||||
import functools
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
import pytest
|
||||
|
||||
import openllm
|
||||
import functools, os, typing as t, pytest, openllm
|
||||
from bentoml._internal.configuration.containers import BentoMLContainer
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
if t.TYPE_CHECKING: from pathlib import Path
|
||||
|
||||
HF_INTERNAL_T5_TESTING = "hf-internal-testing/tiny-random-t5"
|
||||
|
||||
|
||||
@@ -1,16 +1,8 @@
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
import pytest
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
import bentoml
|
||||
from bentoml._internal.resource import get_resource
|
||||
from openllm import _strategies as strategy
|
||||
from openllm._strategies import CascadingResourceStrategy, NvidiaGpuResource
|
||||
import os, typing as t, pytest, bentoml
|
||||
from openllm_core import _strategies as strategy
|
||||
from openllm_core._strategies import CascadingResourceStrategy, NvidiaGpuResource, get_resource
|
||||
if t.TYPE_CHECKING: from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
def test_nvidia_gpu_resource_from_env(monkeypatch: pytest.MonkeyPatch):
|
||||
with monkeypatch.context() as mcls:
|
||||
|
||||
Reference in New Issue
Block a user