fix: persistent styling between ruff and yapf (#279)

This commit is contained in:
Aaron Pham
2023-08-30 11:37:41 -04:00
committed by GitHub
parent f678f71e18
commit c9cef1d773
145 changed files with 1051 additions and 395 deletions

View File

@@ -30,12 +30,19 @@ import cloudpickle
import fs
import openllm
from bentoml._internal.models.model import CUSTOM_OBJECTS_FILENAME
from openllm_core._typing_compat import M, ParamSpec, T
from openllm_core._typing_compat import M
from openllm_core._typing_compat import ParamSpec
from openllm_core._typing_compat import T
if t.TYPE_CHECKING:
import bentoml
from . import constants as constants, ggml as ggml, transformers as transformers
from . import constants as constants
from . import ggml as ggml
from . import transformers as transformers
P = ParamSpec('P')
def load_tokenizer(llm: openllm.LLM[t.Any, T], **tokenizer_attrs: t.Any) -> T:
@@ -44,7 +51,8 @@ def load_tokenizer(llm: openllm.LLM[t.Any, T], **tokenizer_attrs: t.Any) -> T:
By default, it will try to find the bentomodel whether it is in store..
If model is not found, it will raises a ``bentoml.exceptions.NotFound``.
'''
from .transformers._helpers import infer_tokenizers_from_llm, process_config
from .transformers._helpers import infer_tokenizers_from_llm
from .transformers._helpers import process_config
config, *_ = process_config(llm._bentomodel.path, llm.__llm_trust_remote_code__)
bentomodel_fs = fs.open_fs(llm._bentomodel.path)

View File

@@ -1,4 +1,5 @@
from __future__ import annotations
FRAMEWORK_TO_AUTOCLASS_MAPPING = {
'pt': ('AutoModelForCausalLM', 'AutoModelForSeq2SeqLM'),
'tf': ('TFAutoModelForCausalLM', 'TFAutoModelForSeq2SeqLM'),

View File

@@ -7,7 +7,9 @@ import typing as t
import bentoml
import openllm
if t.TYPE_CHECKING: from openllm_core._typing_compat import M
if t.TYPE_CHECKING:
from openllm_core._typing_compat import M
_conversion_strategy = {'pt': 'ggml'}

View File

@@ -5,15 +5,23 @@ import logging
import typing as t
from huggingface_hub import snapshot_download
from simple_di import Provide, inject
from simple_di import Provide
from simple_di import inject
import bentoml
import openllm
from bentoml._internal.configuration.containers import BentoMLContainer
from bentoml._internal.models.model import ModelOptions
from ._helpers import check_unintialised_params, infer_autoclass_from_llm, infer_tokenizers_from_llm, make_model_signatures, process_config, update_model
from ._helpers import check_unintialised_params
from ._helpers import infer_autoclass_from_llm
from ._helpers import infer_tokenizers_from_llm
from ._helpers import make_model_signatures
from ._helpers import process_config
from ._helpers import update_model
from .weights import HfIgnore
if t.TYPE_CHECKING:
import types
@@ -24,7 +32,9 @@ if t.TYPE_CHECKING:
import vllm
from bentoml._internal.models import ModelStore
from openllm_core._typing_compat import DictStrAny, M, T
from openllm_core._typing_compat import DictStrAny
from openllm_core._typing_compat import M
from openllm_core._typing_compat import T
else:
vllm = openllm.utils.LazyLoader('vllm', globals(), 'vllm')
autogptq = openllm.utils.LazyLoader('autogptq', globals(), 'auto_gptq')

View File

@@ -4,16 +4,24 @@ import typing as t
import openllm
import openllm_core
from bentoml._internal.models.model import ModelInfo, ModelSignature
from openllm.serialisation.constants import FRAMEWORK_TO_AUTOCLASS_MAPPING, HUB_ATTRS
from bentoml._internal.models.model import ModelInfo
from bentoml._internal.models.model import ModelSignature
from openllm.serialisation.constants import FRAMEWORK_TO_AUTOCLASS_MAPPING
from openllm.serialisation.constants import HUB_ATTRS
if t.TYPE_CHECKING:
import torch
import transformers
from transformers.models.auto.auto_factory import _BaseAutoModelClass
import bentoml
from bentoml._internal.models.model import ModelSignaturesType
from openllm_core._typing_compat import DictStrAny, M, T
from openllm_core._typing_compat import DictStrAny
from openllm_core._typing_compat import M
from openllm_core._typing_compat import T
else:
transformers, torch = openllm_core.utils.LazyLoader('transformers', globals(), 'transformers'), openllm_core.utils.LazyLoader('torch', globals(), 'torch')

View File

@@ -2,10 +2,14 @@ from __future__ import annotations
import typing as t
import attr
from huggingface_hub import HfApi
if t.TYPE_CHECKING:
import openllm
from openllm_core._typing_compat import M, T
from openllm_core._typing_compat import M
from openllm_core._typing_compat import T
def has_safetensors_weights(model_id: str, revision: str | None = None) -> bool:
return any(s.rfilename.endswith('.safetensors') for s in HfApi().model_info(model_id, revision=revision).siblings)