chore(style): enable yapf to match with style guidelines

Signed-off-by: aarnphm-ec2-dev <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
aarnphm-ec2-dev
2023-08-22 14:03:06 +00:00
parent afb70f5767
commit 1488fbb167
106 changed files with 1840 additions and 1463 deletions

View File

@@ -33,10 +33,31 @@ else:
_warnings.filterwarnings("ignore", message="Neither GITHUB_TOKEN nor GITHUB_JWT_TOKEN found: running as unauthenticated")
_import_structure: dict[str, list[str]] = {
"exceptions": [], "models": [], "client": [], "bundle": [], "playground": [], "testing": [],
"utils": ["infer_auto_class"], "serialisation": ["ggml", "transformers"], "cli._sdk": ["start", "start_grpc", "build", "import_model", "list_models"], "_quantisation": ["infer_quantisation_config"], "_embeddings": ["GenericEmbeddingRunnable"],
"_llm": ["LLM", "Runner", "LLMRunner", "LLMRunnable", "LLMEmbeddings"], "_generation": ["StopSequenceCriteria", "StopOnTokens", "LogitsProcessorList", "StoppingCriteriaList", "prepare_logits_processor"],
"models.auto": ["MODEL_MAPPING_NAMES", "MODEL_FLAX_MAPPING_NAMES", "MODEL_TF_MAPPING_NAMES", "MODEL_VLLM_MAPPING_NAMES"], "models.chatglm": [], "models.baichuan": [], "models.dolly_v2": [], "models.falcon": [], "models.flan_t5": [], "models.gpt_neox": [], "models.llama": [], "models.mpt": [], "models.opt": [], "models.stablelm": [], "models.starcoder": []
"exceptions": [],
"models": [],
"client": [],
"bundle": [],
"playground": [],
"testing": [],
"utils": ["infer_auto_class"],
"serialisation": ["ggml", "transformers"],
"cli._sdk": ["start", "start_grpc", "build", "import_model", "list_models"],
"_quantisation": ["infer_quantisation_config"],
"_embeddings": ["GenericEmbeddingRunnable"],
"_llm": ["LLM", "Runner", "LLMRunner", "LLMRunnable", "LLMEmbeddings"],
"_generation": ["StopSequenceCriteria", "StopOnTokens", "LogitsProcessorList", "StoppingCriteriaList", "prepare_logits_processor"],
"models.auto": ["MODEL_MAPPING_NAMES", "MODEL_FLAX_MAPPING_NAMES", "MODEL_TF_MAPPING_NAMES", "MODEL_VLLM_MAPPING_NAMES"],
"models.chatglm": [],
"models.baichuan": [],
"models.dolly_v2": [],
"models.falcon": [],
"models.flan_t5": [],
"models.gpt_neox": [],
"models.llama": [],
"models.mpt": [],
"models.opt": [],
"models.stablelm": [],
"models.starcoder": []
}
COMPILED = _Path(__file__).suffix in (".pyd", ".so")

View File

@@ -6,43 +6,44 @@ from bentoml._internal.frameworks.transformers import MODULE_NAME, API_VERSION
from bentoml._internal.models.model import ModelOptions, ModelSignature
if t.TYPE_CHECKING: import torch
_GENERIC_EMBEDDING_ID="sentence-transformers/all-MiniLM-L6-v2"
_BENTOMODEL_ID="sentence-transformers--all-MiniLM-L6-v2"
_GENERIC_EMBEDDING_ID = "sentence-transformers/all-MiniLM-L6-v2"
_BENTOMODEL_ID = "sentence-transformers--all-MiniLM-L6-v2"
def get_or_download(ids: str = _BENTOMODEL_ID) -> bentoml.Model:
try:
return bentoml.transformers.get(ids)
except bentoml.exceptions.NotFound:
model_signatures = {k: ModelSignature(batchable=False) for k in ("forward", "generate", "contrastive_search", "greedy_search", "sample", "beam_search", "beam_sample", "group_beam_search", "constrained_beam_search", "__call__")}
with bentoml.models.create(ids, module=MODULE_NAME, api_version=API_VERSION, options=ModelOptions(), context=openllm.utils.generate_context(framework_name="transformers"), labels={"runtime": "pt", "framework": "openllm"}, signatures=model_signatures) as bentomodel:
snapshot_download(_GENERIC_EMBEDDING_ID, local_dir=bentomodel.path, local_dir_use_symlinks=False, ignore_patterns=["*.safetensors","*.h5","*.ot","*.pdf","*.md",".gitattributes","LICENSE.txt"])
snapshot_download(_GENERIC_EMBEDDING_ID, local_dir=bentomodel.path, local_dir_use_symlinks=False, ignore_patterns=["*.safetensors", "*.h5", "*.ot", "*.pdf", "*.md", ".gitattributes", "LICENSE.txt"])
return bentomodel
class GenericEmbeddingRunnable(bentoml.Runnable):
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "cpu")
SUPPORTS_CPU_MULTI_THREADING = True
def __init__(self) -> None:
self.device = "cuda" if openllm.utils.device_count() > 0 else "cpu"
self._bentomodel = get_or_download()
self.tokenizer = transformers.AutoTokenizer.from_pretrained(self._bentomodel.path)
self.model = transformers.AutoModel.from_pretrained(self._bentomodel.path)
self.model.to(self.device)
@bentoml.Runnable.method(batchable=True, batch_dim=0)
def encode(self, sentences: list[str]) -> t.Sequence[openllm.LLMEmbeddings]:
import torch, torch.nn.functional as F
encoded_input = self.tokenizer(sentences, padding=True, truncation=True, return_tensors="pt").to(self.device)
attention_mask = encoded_input["attention_mask"]
# Compute token embeddings
with torch.no_grad(): model_output = self.model(**encoded_input)
with torch.no_grad():
model_output = self.model(**encoded_input)
# Perform pooling and normalize
sentence_embeddings = F.normalize(self.mean_pooling(model_output, attention_mask), p=2, dim=1)
return [openllm.LLMEmbeddings(embeddings=sentence_embeddings.cpu().numpy(), num_tokens=int(torch.sum(attention_mask).item()))]
@staticmethod
def mean_pooling(model_output: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
import torch
# Mean Pooling - Take attention mask into account for correct averaging
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
__all__ = ["GenericEmbeddingRunnable"]

View File

@@ -6,14 +6,16 @@ if t.TYPE_CHECKING: import torch, openllm
# reexport from transformers
LogitsProcessorList = transformers.LogitsProcessorList
StoppingCriteriaList = transformers.StoppingCriteriaList
class StopSequenceCriteria(transformers.StoppingCriteria):
def __init__(self, stop_sequences: str | list[str], tokenizer: transformers.PreTrainedTokenizer | transformers.PreTrainedTokenizerBase | transformers.PreTrainedTokenizerFast):
if isinstance(stop_sequences, str): stop_sequences = [stop_sequences]
self.stop_sequences, self.tokenizer = stop_sequences, tokenizer
def __call__(self, input_ids: torch.Tensor, scores: t.Any, **_: t.Any) -> bool: return any(self.tokenizer.decode(input_ids.tolist()[0]).endswith(stop_sequence) for stop_sequence in self.stop_sequences)
def __call__(self, input_ids: torch.Tensor, scores: t.Any, **_: t.Any) -> bool:
return any(self.tokenizer.decode(input_ids.tolist()[0]).endswith(stop_sequence) for stop_sequence in self.stop_sequences)
class StopOnTokens(transformers.StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **_: t.Any) -> bool: return input_ids[0][-1] in {50278, 50279, 50277, 1, 0}
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **_: t.Any) -> bool:
return input_ids[0][-1] in {50278, 50279, 50277, 1, 0}
def prepare_logits_processor(config: openllm.LLMConfig) -> transformers.LogitsProcessorList:
generation_config = config.generation_config
logits_processor = transformers.LogitsProcessorList()
@@ -22,18 +24,18 @@ def prepare_logits_processor(config: openllm.LLMConfig) -> transformers.LogitsPr
if 1e-8 <= generation_config["top_p"]: logits_processor.append(transformers.TopPLogitsWarper(generation_config["top_p"]))
if generation_config["top_k"] > 0: logits_processor.append(transformers.TopKLogitsWarper(generation_config["top_k"]))
return logits_processor
# NOTE: The ordering here is important. Some models have two of these and we have a preference for which value gets used.
SEQLEN_KEYS = ["max_sequence_length", "seq_length", "max_position_embeddings", "max_seq_len", "model_max_length"]
def get_context_length(config: transformers.PretrainedConfig) -> int:
rope_scaling = getattr(config, "rope_scaling", None)
rope_scaling_factor = config.rope_scaling["factor"] if rope_scaling else 1.0
for key in SEQLEN_KEYS:
if getattr(config, key, None) is not None: return int(rope_scaling_factor*getattr(config,key))
if getattr(config, key, None) is not None: return int(rope_scaling_factor * getattr(config, key))
return 2048
def is_sentence_complete(output: str) -> bool: return output.endswith((".", "?", "!", "...", "", "?", "!", "", '"', "'", ""))
def is_sentence_complete(output: str) -> bool:
return output.endswith((".", "?", "!", "...", "", "?", "!", "", '"', "'", ""))
def is_partial_stop(output: str, stop_str: str) -> bool:
"""Check whether the output contains a partial stop str."""
for i in range(0, min(len(output), len(stop_str))):
if stop_str.startswith(output[-i:]): return True
return False
"""Check whether the output contains a partial stop str."""
for i in range(0, min(len(output), len(stop_str))):
if stop_str.startswith(output[-i:]): return True
return False

View File

@@ -5,53 +5,14 @@ from pathlib import Path
from huggingface_hub import hf_hub_download
from bentoml._internal.models.model import ModelSignature
from openllm_core._configuration import (
FineTuneConfig,
LLMConfig,
_object_getattribute,
_setattr_class,
)
from openllm_core._configuration import FineTuneConfig, LLMConfig, _object_getattribute, _setattr_class
from ._quantisation import infer_quantisation_config
from openllm_core._schema import unmarshal_vllm_outputs
from .exceptions import ForbiddenAttributeError, GpuNotAvailableError, OpenLLMException
from .models.auto import AutoConfig
from openllm_core.utils import (
DEBUG,
ENV_VARS_TRUE_VALUES,
MYPY,
EnvVarMixin,
LazyLoader,
ReprMixin,
apply,
bentoml_cattr,
codegen,
device_count,
first_not_none,
generate_hash_from_file,
is_peft_available,
is_torch_available,
non_intrusive_setattr,
normalize_attrs_to_model_tokenizer_pair,
resolve_filepath,
validate_is_path,
)
from openllm_core.utils import DEBUG, ENV_VARS_TRUE_VALUES, MYPY, EnvVarMixin, LazyLoader, ReprMixin, apply, bentoml_cattr, codegen, device_count, first_not_none, generate_hash_from_file, is_peft_available, is_torch_available, non_intrusive_setattr, normalize_attrs_to_model_tokenizer_pair, resolve_filepath, validate_is_path
from .utils import infer_auto_class
from openllm_core._typing_compat import (
AdaptersMapping,
AdaptersTuple,
AnyCallable,
AdapterType,
LiteralRuntime,
DictStrAny,
ListStr,
LLMEmbeddings,
LLMRunnable,
LLMRunner,
ModelSignatureDict as _ModelSignatureDict,
PeftAdapterOutput,
TupleAny,
NotRequired, overload, M, T, LiteralString
)
from openllm_core._typing_compat import AdaptersMapping, AdaptersTuple, AnyCallable, AdapterType, LiteralRuntime, DictStrAny, ListStr, LLMEmbeddings, LLMRunnable, LLMRunner, ModelSignatureDict as _ModelSignatureDict, PeftAdapterOutput, TupleAny, NotRequired, overload, M, T, LiteralString
if t.TYPE_CHECKING:
import auto_gptq as autogptq, peft, torch, transformers, vllm
@@ -72,13 +33,10 @@ class ModelSignatureDict(t.TypedDict, total=False):
batch_dim: t.Union[t.Tuple[int, int], int]
input_spec: NotRequired[t.Union[t.Any, t.Tuple[t.Any]]]
output_spec: NotRequired[t.Any]
def normalise_model_name(name: str) -> str:
return os.path.basename(resolve_filepath(name)) if validate_is_path(name) else re.sub("[^a-zA-Z0-9]+", "-", name)
# the below is similar to peft.utils.other.CONFIG_NAME
PEFT_CONFIG_NAME = "adapter_config.json"
def resolve_peft_config_type(adapter_map: dict[str, str | None]) -> AdaptersMapping:
"""Resolve the type of the PeftConfig given the adapter_map.
@@ -109,9 +67,7 @@ def resolve_peft_config_type(adapter_map: dict[str, str | None]) -> AdaptersMapp
if _peft_type not in resolved: resolved[_peft_type] = ()
resolved[_peft_type] += (_AdaptersTuple((path_or_adapter_id, resolve_name, resolved_config)),)
return resolved
_reserved_namespace = {"config_class", "model", "tokenizer", "import_kwargs"}
class LLMInterface(ABC, t.Generic[M, T]):
"""This defines the loose contract for all openllm.LLM implementations."""
@property
@@ -245,23 +201,42 @@ class LLMInterface(ABC, t.Generic[M, T]):
__llm_supports_generate_iterator__: bool
"""A boolean to determine whether models does implement ``LLM.generate_iterator``."""
if t.TYPE_CHECKING and not MYPY:
def __attrs_init__(self, config: LLMConfig, quantization_config: t.Optional[t.Union[transformers.BitsAndBytesConfig, autogptq.BaseQuantizeConfig]], model_id: str, runtime: t.Literal["ggml", "transformers"], model_decls: TupleAny, model_attrs: DictStrAny, tokenizer_attrs: DictStrAny, tag: bentoml.Tag, adapters_mapping: t.Optional[AdaptersMapping], model_version: t.Optional[str], quantize_method: t.Optional[t.Literal["int8", "int4", "gptq"]], serialisation_format: t.Literal["safetensors", "legacy"], _local: bool, **attrs: t.Any) -> None:
"""Generated __attrs_init__ for openllm.LLM."""
def __attrs_init__(
self,
config: LLMConfig,
quantization_config: t.Optional[t.Union[transformers.BitsAndBytesConfig, autogptq.BaseQuantizeConfig]],
model_id: str,
runtime: t.Literal["ggml", "transformers"],
model_decls: TupleAny,
model_attrs: DictStrAny,
tokenizer_attrs: DictStrAny,
tag: bentoml.Tag,
adapters_mapping: t.Optional[AdaptersMapping],
model_version: t.Optional[str],
quantize_method: t.Optional[t.Literal["int8", "int4", "gptq"]],
serialisation_format: t.Literal["safetensors", "legacy"],
_local: bool,
**attrs: t.Any
) -> None:
"""Generated __attrs_init__ for openllm.LLM."""
_R = t.TypeVar("_R", covariant=True)
class _import_model_wrapper(t.Generic[_R, M, T], t.Protocol):
def __call__(self, llm: LLM[M, T], *decls: t.Any, trust_remote_code: bool, **attrs: t.Any) -> _R: ...
def __call__(self, llm: LLM[M, T], *decls: t.Any, trust_remote_code: bool, **attrs: t.Any) -> _R:
...
class _load_model_wrapper(t.Generic[M, T], t.Protocol):
def __call__(self, llm: LLM[M, T], *decls: t.Any, **attrs: t.Any) -> M: ...
def __call__(self, llm: LLM[M, T], *decls: t.Any, **attrs: t.Any) -> M:
...
class _load_tokenizer_wrapper(t.Generic[M, T], t.Protocol):
def __call__(self, llm: LLM[M, T], **attrs: t.Any) -> T: ...
def __call__(self, llm: LLM[M, T], **attrs: t.Any) -> T:
...
class _llm_post_init_wrapper(t.Generic[M, T], t.Protocol):
def __call__(self, llm: LLM[M, T]) -> T: ...
def __call__(self, llm: LLM[M, T]) -> T:
...
class _save_pretrained_wrapper(t.Generic[M, T], t.Protocol):
def __call__(self, llm: LLM[M, T], save_directory: str | Path, **attrs: t.Any) -> None: ...
def __call__(self, llm: LLM[M, T], save_directory: str | Path, **attrs: t.Any) -> None:
...
_object_setattr = object.__setattr__
# NOTE: the following wrapper are a light meta ops for wrapping default params to internal methods implementation.
def _wrapped_import_model(f: _import_model_wrapper[bentoml.Model, M, T]) -> t.Callable[[LLM[M, T]], bentoml.Model]:
@functools.wraps(f)
@@ -271,38 +246,39 @@ def _wrapped_import_model(f: _import_model_wrapper[bentoml.Model, M, T]) -> t.Ca
decls = (*model_decls, *decls)
attrs = {**model_attrs, **attrs}
return f(self, *decls, trust_remote_code=trust_remote_code, **attrs)
return wrapper
_DEFAULT_TOKENIZER = "hf-internal-testing/llama-tokenizer"
def get_engine_args(llm: LLM[M, T], tokenizer: str = _DEFAULT_TOKENIZER) -> vllm.EngineArgs: return vllm.EngineArgs(model=llm._bentomodel.path, tokenizer=tokenizer, tokenizer_mode="auto", tensor_parallel_size=1 if device_count() < 2 else device_count(), dtype="auto", worker_use_ray=False)
def get_engine_args(llm: LLM[M, T], tokenizer: str = _DEFAULT_TOKENIZER) -> vllm.EngineArgs:
return vllm.EngineArgs(model=llm._bentomodel.path, tokenizer=tokenizer, tokenizer_mode="auto", tensor_parallel_size=1 if device_count() < 2 else device_count(), dtype="auto", worker_use_ray=False)
def _wrapped_load_model(f: _load_model_wrapper[M, T]) -> t.Callable[[LLM[M, T]], M | vllm.LLMEngine]:
@functools.wraps(f)
def wrapper(self: LLM[M, T], *decls: t.Any, **attrs: t.Any) -> M | vllm.LLMEngine:
if self.__llm_implementation__ == "vllm":
# TODO: Do some more processing with token_id once we support token streaming
try: return vllm.LLMEngine.from_engine_args(get_engine_args(self, tokenizer=self._bentomodel.path if self.tokenizer_id == "local" else self.tokenizer_id))
try:
return vllm.LLMEngine.from_engine_args(get_engine_args(self, tokenizer=self._bentomodel.path if self.tokenizer_id == "local" else self.tokenizer_id))
except Exception as err:
traceback.print_exc()
raise OpenLLMException(f"Failed to initialise vLLMEngine due to the following error:\n{err}") from None
else:
(model_decls, model_attrs), _ = self.llm_parameters
return f(self, *(*model_decls, *decls), **{**model_attrs, **attrs})
return wrapper
return wrapper
def _wrapped_load_tokenizer(f: _load_tokenizer_wrapper[M, T]) -> t.Callable[[LLM[M, T]], T]:
@functools.wraps(f)
def wrapper(self: LLM[M, T], **tokenizer_attrs: t.Any) -> T: return f(self, **{**self.llm_parameters[-1], **tokenizer_attrs})
return wrapper
def wrapper(self: LLM[M, T], **tokenizer_attrs: t.Any) -> T:
return f(self, **{**self.llm_parameters[-1], **tokenizer_attrs})
return wrapper
def _wrapped_llm_post_init(f: _llm_post_init_wrapper[M, T]) -> t.Callable[[LLM[M, T]], None]:
@functools.wraps(f)
def wrapper(self: LLM[M, T]) -> None:
if self.__llm_implementation__ == "pt" and is_torch_available(): self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
f(self)
return wrapper
return wrapper
def _wrapped_save_pretrained(f: _save_pretrained_wrapper[M, T]) -> t.Callable[[LLM[M, T], str | Path], None]:
@functools.wraps(f)
def wrapper(self: LLM[M, T], save_directory: str | Path, **attrs: t.Any) -> None:
@@ -310,8 +286,8 @@ def _wrapped_save_pretrained(f: _save_pretrained_wrapper[M, T]) -> t.Callable[[L
if self.__llm_model__ is None: raise RuntimeError("Cannot 'save_pretrained' with unload model instance.")
if self.bettertransformer and self.__llm_implementation__ == "pt": _object_setattr(self, "__llm_model__", t.cast("transformers.PreTrainedModel", self.__llm_model__).reverse_bettertransformer())
f(self, save_directory, **attrs)
return wrapper
return wrapper
def _update_docstring(cls: LLM[M, T], fn: str) -> AnyCallable:
# update docstring for given entrypoint
original_fn = getattr(cls, fn, getattr(LLMInterface, fn))
@@ -323,7 +299,6 @@ def _update_docstring(cls: LLM[M, T], fn: str) -> AnyCallable:
"""
setattr(cls, fn, original_fn)
return original_fn
def _make_assignment_script(cls: type[LLM[M, T]]) -> t.Callable[[type[LLM[M, T]]], None]:
attributes = {"import_model": _wrapped_import_model, "load_model": _wrapped_load_model, "load_tokenizer": _wrapped_load_tokenizer, "llm_post_init": _wrapped_llm_post_init, "save_pretrained": _wrapped_save_pretrained}
args: ListStr = []
@@ -356,8 +331,8 @@ def _make_assignment_script(cls: type[LLM[M, T]]) -> t.Callable[[type[LLM[M, T]]
lines.extend([_setattr_class(key, f"cls.{fn} is not _cached_LLMInterface_get('{fn}')"), f"__gen_docstring(cls, '{fn}')",])
anns[key] = interface_anns.get(key)
return codegen.generate_function(cls, "__assign_llm_attr", lines, args=("cls", *args), globs=globs, annotations=anns)
def vllm_postprocess_generate(self: LLM["vllm.LLMEngine", T], prompt: str, generation_result: list[dict[str, t.Any]], **_: t.Any) -> str: return generation_result[0]["outputs"][0]["text"]
def vllm_postprocess_generate(self: LLM["vllm.LLMEngine", T], prompt: str, generation_result: list[dict[str, t.Any]], **_: t.Any) -> str:
return generation_result[0]["outputs"][0]["text"]
def vllm_generate(self: LLM["vllm.LLMEngine", T], prompt: str, **attrs: t.Any) -> list[dict[str, t.Any]]:
outputs: list[vllm.RequestOutput] = []
# TODO: support prompt_token_ids
@@ -365,9 +340,7 @@ def vllm_generate(self: LLM["vllm.LLMEngine", T], prompt: str, **attrs: t.Any) -
while self.model.has_unfinished_requests():
outputs.extend([r for r in self.model.step() if r.finished])
return [unmarshal_vllm_outputs(i) for i in outputs]
_AdaptersTuple: type[AdaptersTuple] = codegen.make_attr_tuple_class("AdaptersTuple", ["adapter_id", "name", "config"])
@attr.define(slots=True, repr=False, init=False)
class LLM(LLMInterface[M, T], ReprMixin):
if t.TYPE_CHECKING: __name__: str
@@ -405,6 +378,8 @@ class LLM(LLMInterface[M, T], ReprMixin):
elif "config_class" not in cd: raise RuntimeError("Missing required key 'config_class'. Make sure to define it within the LLM subclass.")
_make_assignment_script(cls)(cls)
if "tokenizer_id" not in cd and cls.__llm_implementation__ == "vllm": cls.tokenizer_id = _DEFAULT_TOKENIZER
# fmt: off
@overload
def __getitem__(self, item: t.Literal["trust_remote_code"]) -> bool: ...
@overload
@@ -525,10 +500,8 @@ class LLM(LLMInterface[M, T], ReprMixin):
except Exception as err:
raise OpenLLMException(f"Failed to generate a valid tag for {cfg_cls.__openllm_start_name__} with 'model_id={_model_id}' (lookup to see its traceback):\n{err}") from err
return cls(
*args, model_id=_model_id, llm_config=llm_config, quantization_config=quantization_config, _quantize_method=quantize, _model_version=_tag.version, _tag=_tag, _serialisation_format=serialisation, _local=_local, bettertransformer=str(first_not_none(bettertransformer, os.environ.get(cfg_cls.__openllm_env__["bettertransformer"]), default=None)).upper() in ENV_VARS_TRUE_VALUES,
_runtime=first_not_none(runtime, t.cast(t.Optional[t.Literal["ggml", "transformers"]], os.environ.get(cfg_cls.__openllm_env__["runtime"])), default=cfg_cls.__openllm_runtime__), _adapters_mapping=resolve_peft_config_type(adapter_map) if adapter_map is not None else None, **attrs
)
return cls(*args, model_id=_model_id, llm_config=llm_config, quantization_config=quantization_config, _quantize_method=quantize, _model_version=_tag.version, _tag=_tag, _serialisation_format=serialisation, _local=_local, bettertransformer=str(first_not_none(bettertransformer, os.environ.get(cfg_cls.__openllm_env__["bettertransformer"]), default=None)).upper() in ENV_VARS_TRUE_VALUES, _runtime=first_not_none(runtime, t.cast(t.Optional[t.Literal["ggml", "transformers"]], os.environ.get(cfg_cls.__openllm_env__["runtime"])), default=cfg_cls.__openllm_runtime__), _adapters_mapping=resolve_peft_config_type(adapter_map) if adapter_map is not None else None, **attrs)
# fmt: on
@classmethod
@apply(str.lower)
@@ -568,12 +541,10 @@ class LLM(LLMInterface[M, T], ReprMixin):
return f"{tag_name}:{model_version}"
@classmethod
def generate_tag(cls, *param_decls: t.Any, **attrs: t.Any) -> bentoml.Tag: return bentoml.Tag.from_taglike(cls._generate_tag_str(*param_decls, **attrs))
def generate_tag(cls, *param_decls: t.Any, **attrs: t.Any) -> bentoml.Tag:
return bentoml.Tag.from_taglike(cls._generate_tag_str(*param_decls, **attrs))
def __init__(
self, *args: t.Any, model_id: str, llm_config: LLMConfig, bettertransformer: bool | None, quantization_config: transformers.BitsAndBytesConfig | autogptq.BaseQuantizeConfig | None, _adapters_mapping: AdaptersMapping | None, _tag: bentoml.Tag, _quantize_method: t.Literal["int8", "int4", "gptq"] | None, _runtime: t.Literal["ggml", "transformers"], _model_version: str,
_serialisation_format: t.Literal["safetensors", "legacy"], _local: bool, **attrs: t.Any,
):
def __init__(self, *args: t.Any, model_id: str, llm_config: LLMConfig, bettertransformer: bool | None, quantization_config: transformers.BitsAndBytesConfig | autogptq.BaseQuantizeConfig | None, _adapters_mapping: AdaptersMapping | None, _tag: bentoml.Tag, _quantize_method: t.Literal["int8", "int4", "gptq"] | None, _runtime: t.Literal["ggml", "transformers"], _model_version: str, _serialisation_format: t.Literal["safetensors", "legacy"], _local: bool, **attrs: t.Any,):
"""Initialize the LLM with given pretrained model.
> [!WARNING]
@@ -685,32 +656,55 @@ class LLM(LLMInterface[M, T], ReprMixin):
def __setattr__(self, attr: str, value: t.Any) -> None:
if attr in _reserved_namespace: raise ForbiddenAttributeError(f"{attr} should not be set during runtime as these value will be reflected during runtime. Instead, you can create a custom LLM subclass {self.__class__.__name__}.")
super().__setattr__(attr, value)
@property
def adapters_mapping(self) -> AdaptersMapping | None: return self._adapters_mapping
def adapters_mapping(self) -> AdaptersMapping | None:
return self._adapters_mapping
@adapters_mapping.setter
def adapters_mapping(self, value: AdaptersMapping) -> None: self._adapters_mapping = value
def adapters_mapping(self, value: AdaptersMapping) -> None:
self._adapters_mapping = value
@property
def __repr_keys__(self) -> set[str]: return {"model_id", "runner_name", "config", "adapters_mapping", "runtime", "tag"}
def __repr_keys__(self) -> set[str]:
return {"model_id", "runner_name", "config", "adapters_mapping", "runtime", "tag"}
def __repr_args__(self) -> ReprArgs:
for k in self.__repr_keys__:
if k == "config": yield k, self.config.model_dump(flatten=True)
else: yield k, getattr(self, k)
@property
def model_id(self) -> str: return self._model_id
def model_id(self) -> str:
return self._model_id
@property
def runtime(self) -> t.Literal["ggml", "transformers"]: return self._runtime
def runtime(self) -> t.Literal["ggml", "transformers"]:
return self._runtime
@property
def runner_name(self) -> str: return f"llm-{self.config['start_name']}-runner"
def runner_name(self) -> str:
return f"llm-{self.config['start_name']}-runner"
# NOTE: The section below defines a loose contract with langchain's LLM interface.
@property
def llm_type(self) -> str: return normalise_model_name(self._model_id)
def llm_type(self) -> str:
return normalise_model_name(self._model_id)
@property
def identifying_params(self) -> DictStrAny: return {"configuration": self.config.model_dump_json().decode(), "model_ids": orjson.dumps(self.config["model_ids"]).decode()}
def identifying_params(self) -> DictStrAny:
return {"configuration": self.config.model_dump_json().decode(), "model_ids": orjson.dumps(self.config["model_ids"]).decode()}
@property
def llm_parameters(self) -> tuple[tuple[tuple[t.Any, ...], DictStrAny], DictStrAny]: return (self._model_decls, self._model_attrs), self._tokenizer_attrs
def llm_parameters(self) -> tuple[tuple[tuple[t.Any, ...], DictStrAny], DictStrAny]:
return (self._model_decls, self._model_attrs), self._tokenizer_attrs
@property
def tag(self) -> bentoml.Tag: return self._tag
def ensure_model_id_exists(self) -> bentoml.Model: return openllm.import_model(self.config["start_name"], model_id=self.model_id, model_version=self._model_version, runtime=self.runtime, implementation=self.__llm_implementation__, quantize=self._quantize_method, serialisation_format=self._serialisation_format)
def tag(self) -> bentoml.Tag:
return self._tag
def ensure_model_id_exists(self) -> bentoml.Model:
return openllm.import_model(self.config["start_name"], model_id=self.model_id, model_version=self._model_version, runtime=self.runtime, implementation=self.__llm_implementation__, quantize=self._quantize_method, serialisation_format=self._serialisation_format)
@property
def _bentomodel(self) -> bentoml.Model:
@@ -727,6 +721,7 @@ class LLM(LLMInterface[M, T], ReprMixin):
- The attributes dictionary that will be passed into `self.postprocess_generate`.
"""
return self.config.sanitize_parameters(prompt, **attrs)
def postprocess_generate(self, prompt: str, generation_result: t.Any, **attrs: t.Any) -> t.Any:
"""This handler will postprocess generation results from LLM.generate and then output nicely formatted results (if the LLM decide to do so.).
@@ -874,21 +869,22 @@ class LLM(LLMInterface[M, T], ReprMixin):
"""
models = models if models is not None else []
try: models.append(self._bentomodel)
except bentoml.exceptions.NotFound as err: raise RuntimeError(f"Failed to locate {self._bentomodel}:{err}") from None
try:
models.append(self._bentomodel)
except bentoml.exceptions.NotFound as err:
raise RuntimeError(f"Failed to locate {self._bentomodel}:{err}") from None
generate_sig = ModelSignature.from_dict(t.cast("_ModelSignatureDict", ModelSignatureDict(batchable=False)))
embeddings_sig = ModelSignature.from_dict(t.cast("_ModelSignatureDict", ModelSignatureDict(batchable=True, batch_dim=0)))
generate_iterator_sig = ModelSignature.from_dict(t.cast("_ModelSignatureDict", ModelSignatureDict(batchable=False)))
# NOTE: returning the two langchain API's to the runner
return llm_runner_class(self)(
llm_runnable_class(self, embeddings_sig, generate_sig, generate_iterator_sig), name=self.runner_name, embedded=False, models=models, max_batch_size=max_batch_size, max_latency_ms=max_latency_ms,
method_configs=bentoml_cattr.unstructure({"embeddings": embeddings_sig, "__call__": generate_sig, "generate": generate_sig, "generate_one": generate_sig, "generate_iterator": generate_iterator_sig}), scheduling_strategy=scheduling_strategy,
)
return llm_runner_class(self)(llm_runnable_class(self, embeddings_sig, generate_sig, generate_iterator_sig), name=self.runner_name, embedded=False, models=models, max_batch_size=max_batch_size, max_latency_ms=max_latency_ms, method_configs=bentoml_cattr.unstructure({"embeddings": embeddings_sig, "__call__": generate_sig, "generate": generate_sig, "generate_one": generate_sig, "generate_iterator": generate_iterator_sig}), scheduling_strategy=scheduling_strategy,)
# NOTE: Scikit API
def predict(self, prompt: str, **attrs: t.Any) -> t.Any: return self(prompt, **attrs)
def predict(self, prompt: str, **attrs: t.Any) -> t.Any:
return self(prompt, **attrs)
def __call__(self, prompt: str, **attrs: t.Any) -> t.Any:
"""Returns the generation result and format the result.
@@ -908,11 +904,11 @@ class LLM(LLMInterface[M, T], ReprMixin):
def generate(self, prompt: str, **attrs: t.Any) -> t.List[t.Any]:
# TODO: support different generation strategies, similar to self.model.generate
for it in self.generate_iterator(prompt, **attrs): pass
for it in self.generate_iterator(prompt, **attrs):
pass
return [it]
def generate_iterator(self, prompt: str, /,
*, context_length: int | None = None, echo: bool = True, stream_interval: int = 2, stop: str | t.Iterable[str] | None = None, stop_token_ids: list[int] | None = None, **attrs: t.Any) -> t.Iterator[t.Any]:
def generate_iterator(self, prompt: str, /, *, context_length: int | None = None, echo: bool = True, stream_interval: int = 2, stop: str | t.Iterable[str] | None = None, stop_token_ids: list[int] | None = None, **attrs: t.Any) -> t.Iterator[t.Any]:
# NOTE: encoder-decoder models will need to implement their own generate_iterator for now
# inspired from fastchat's generate_stream_func
from ._generation import prepare_logits_processor, get_context_length, is_partial_stop
@@ -937,7 +933,7 @@ class LLM(LLMInterface[M, T], ReprMixin):
if i == 0: # prefill
out = self.model(torch.as_tensor([input_ids], device=self.device), use_cache=True)
else: # decoding
out = self.model(input_ids=torch.as_tensor([[token]], device=self.device), use_cache=True, past_key_values=past_key_values) # type: ignore[has-type]
out = self.model(input_ids=torch.as_tensor([[token]], device=self.device), use_cache=True, past_key_values=past_key_values)
logits = out.logits
past_key_values = out.past_key_values
@@ -990,7 +986,7 @@ class LLM(LLMInterface[M, T], ReprMixin):
del past_key_values, out
gc.collect()
torch.cuda.empty_cache()
# fmt: off
@overload
def Runner(model_name: str, *, model_id: str | None = None, model_version: str | None = ..., init_local: t.Literal[False, True] = ..., **attrs: t.Any) -> LLMRunner[t.Any, t.Any]: ...
@overload
@@ -1039,16 +1035,16 @@ def Runner(model_name: str, ensure_available: bool | None = None, init_local: bo
runner = infer_auto_class(implementation).create_runner(model_name, llm_config=llm_config, ensure_available=ensure_available if ensure_available is not None else init_local, **attrs)
if init_local: runner.init_local(quiet=True)
return runner
# fmt: off
def method_signature(sig: ModelSignature) -> ModelSignatureDict: return bentoml_cattr.unstructure(sig)
class SetAdapterOutput(t.TypedDict):
success: bool
message: str
def llm_runnable_class(self: LLM[M, T], embeddings_sig: ModelSignature, generate_sig: ModelSignature, generate_iterator_sig: ModelSignature) -> type[LLMRunnable[M, T]]:
class _Runnable(bentoml.Runnable):
SUPPORTED_RESOURCES = ("nvidia.com/gpu", "amd.com/gpu", "cpu")
SUPPORTS_CPU_MULTI_THREADING = True
def __init__(__self: _Runnable):
# NOTE: The side effect of this line
# is that it will load the imported model during
@@ -1057,28 +1053,35 @@ def llm_runnable_class(self: LLM[M, T], embeddings_sig: ModelSignature, generate
if self.adapters_mapping is not None:
logger.info("Applying LoRA to %s...", self.runner_name)
self.apply_adapter(inference_mode=True, load_adapters="all")
def set_adapter(__self: _Runnable, adapter_name: str) -> None:
if self.__llm_adapter_map__ is None: raise ValueError("No adapters available for current running server.")
elif not isinstance(self.model, peft.PeftModel): raise RuntimeError("Model is not a PeftModel")
if adapter_name != "default": self.model.set_adapter(adapter_name)
logger.info("Successfully apply LoRA layer %s", adapter_name)
@bentoml.Runnable.method(**method_signature(embeddings_sig))
def embeddings(__self: _Runnable, prompt: str | list[str]) -> t.Sequence[LLMEmbeddings]: return [self.embeddings([prompt] if isinstance(prompt, str) else prompt)]
def embeddings(__self: _Runnable, prompt: str | list[str]) -> t.Sequence[LLMEmbeddings]:
return [self.embeddings([prompt] if isinstance(prompt, str) else prompt)]
@bentoml.Runnable.method(**method_signature(generate_sig))
def __call__(__self: _Runnable, prompt: str, **attrs: t.Any) -> list[t.Any]:
adapter_name = attrs.pop("adapter_name", None)
if adapter_name is not None: __self.set_adapter(adapter_name)
return self.generate(prompt, **attrs)
@bentoml.Runnable.method(**method_signature(generate_sig))
def generate(__self: _Runnable, prompt: str, **attrs: t.Any) -> list[t.Any]:
adapter_name = attrs.pop("adapter_name", None)
if adapter_name is not None: __self.set_adapter(adapter_name)
return self.generate(prompt, **attrs)
@bentoml.Runnable.method(**method_signature(generate_sig))
def generate_one(__self: _Runnable, prompt: str, stop: list[str], **attrs: t.Any) -> t.Sequence[dict[t.Literal["generated_text"], str]]:
adapter_name = attrs.pop("adapter_name", None)
if adapter_name is not None: __self.set_adapter(adapter_name)
return self.generate_one(prompt, stop, **attrs)
@bentoml.Runnable.method(**method_signature(generate_iterator_sig))
def generate_iterator(__self: _Runnable, prompt: str, **attrs: t.Any) -> t.Generator[str, None, str]:
adapter_name = attrs.pop("adapter_name", None)
@@ -1094,13 +1097,13 @@ def llm_runnable_class(self: LLM[M, T], embeddings_sig: ModelSignature, generate
return " ".join(output_text)
return types.new_class(self.__class__.__name__ + "Runnable", (_Runnable,), {}, lambda ns: ns.update({"SUPPORTED_RESOURCES": ("nvidia.com/gpu", "amd.com/gpu") if self.config["requires_gpu"] else ("nvidia.com/gpu", "amd.com/gpu", "cpu"), "__module__": self.__module__, "__doc__": self.config["env"].start_docstring}))
def llm_runner_class(self: LLM[M, T]) -> type[LLMRunner[M, T]]:
def available_adapters(_: LLMRunner[M, T]) -> PeftAdapterOutput:
if not is_peft_available(): return PeftAdapterOutput(success=False, result={}, error_msg="peft is not available. Make sure to install: 'pip install \"openllm[fine-tune]\"'")
if self.__llm_adapter_map__ is None: return PeftAdapterOutput(success=False, result={}, error_msg="No adapters available for current running server.")
if not isinstance(self.model, peft.PeftModel): return PeftAdapterOutput(success=False, result={}, error_msg="Model is not a PeftModel")
return PeftAdapterOutput(success=True, result=self.model.peft_config, error_msg="")
def _wrapped_generate_run(__self: LLMRunner[M, T], prompt: str, **kwargs: t.Any) -> t.Any:
"""Wrapper for runner.generate.run() to handle the prompt and postprocessing.
@@ -1128,7 +1131,9 @@ def llm_runner_class(self: LLM[M, T]) -> type[LLMRunner[M, T]]:
"""
return __self.embeddings.run([prompt] if isinstance(prompt, str) else prompt)
def _wrapped_repr_keys(_: LLMRunner[M, T]) -> set[str]: return {"config", "llm_type", "runner_methods", "runtime", "llm_tag"}
def _wrapped_repr_keys(_: LLMRunner[M, T]) -> set[str]:
return {"config", "llm_type", "runner_methods", "runtime", "llm_tag"}
def _wrapped_repr_args(__self: LLMRunner[M, T]) -> ReprArgs:
yield "runner_methods", {method.name: {"batchable": method.config.batchable, "batch_dim": method.config.batch_dim if method.config.batchable else None} for method in __self.runner_methods}
yield "config", self.config.model_dump(flatten=True)
@@ -1136,8 +1141,10 @@ def llm_runner_class(self: LLM[M, T]) -> type[LLMRunner[M, T]]:
yield "runtime", self.runtime
yield "llm_tag", self.tag
return types.new_class(self.__class__.__name__ + "Runner", (bentoml.Runner,), exec_body=lambda ns: ns.update({"llm_type": self.llm_type, "identifying_params": self.identifying_params, "llm_tag": self.tag, "llm": self, "config": self.config, "implementation": self.__llm_implementation__, "peft_adapters": property(fget=available_adapters),
"download_model": self.ensure_model_id_exists, "__call__": _wrapped_generate_run, "embed": _wrapped_embeddings_run, "__module__": self.__module__, "__doc__": self.config["env"].start_docstring, "__repr__": ReprMixin.__repr__,
"__repr_keys__": property( _wrapped_repr_keys), "__repr_args__": _wrapped_repr_args, "supports_embeddings": self["supports_embeddings"], "supports_hf_agent": self["supports_generate_one"], "has_adapters": self._adapters_mapping is not None}))
return types.new_class(
self.__class__.__name__ + "Runner", (bentoml.Runner,),
exec_body=lambda ns: ns.update({
"llm_type": self.llm_type, "identifying_params": self.identifying_params, "llm_tag": self.tag, "llm": self, "config": self.config, "implementation": self.__llm_implementation__, "peft_adapters": property(fget=available_adapters), "download_model": self.ensure_model_id_exists, "__call__": _wrapped_generate_run, "embed": _wrapped_embeddings_run, "__module__": self.__module__, "__doc__": self.config["env"].start_docstring, "__repr__": ReprMixin.__repr__, "__repr_keys__": property(_wrapped_repr_keys), "__repr_args__": _wrapped_repr_args, "supports_embeddings": self["supports_embeddings"], "supports_hf_agent": self["supports_generate_one"], "has_adapters": self._adapters_mapping is not None
})
)
__all__ = ["LLMRunner", "LLMRunnable", "Runner", "LLM", "llm_runner_class", "llm_runnable_class", "LLMEmbeddings"]

View File

@@ -6,17 +6,17 @@ from openllm_core._typing_compat import overload
if t.TYPE_CHECKING:
from ._llm import LLM
from openllm_core._typing_compat import DictStrAny
autogptq, torch, transformers = LazyLoader("autogptq", globals(), "auto_gptq"), LazyLoader("torch", globals(), "torch"), LazyLoader("transformers", globals(), "transformers")
logger = logging.getLogger(__name__)
QuantiseMode = t.Literal["int8", "int4", "gptq"]
@overload
def infer_quantisation_config(cls: type[LLM[t.Any, t.Any]], quantise: t.Literal["int8", "int4"], **attrs: t.Any) -> tuple[transformers.BitsAndBytesConfig, DictStrAny]: ...
def infer_quantisation_config(cls: type[LLM[t.Any, t.Any]], quantise: t.Literal["int8", "int4"], **attrs: t.Any) -> tuple[transformers.BitsAndBytesConfig, DictStrAny]:
...
@overload
def infer_quantisation_config(cls: type[LLM[t.Any, t.Any]], quantise: t.Literal["gptq"], **attrs: t.Any) -> tuple[autogptq.BaseQuantizeConfig, DictStrAny]: ...
def infer_quantisation_config(cls: type[LLM[t.Any, t.Any]], quantise: t.Literal["gptq"], **attrs: t.Any) -> tuple[autogptq.BaseQuantizeConfig, DictStrAny]:
...
def infer_quantisation_config(cls: type[LLM[t.Any, t.Any]], quantise: QuantiseMode, **attrs: t.Any) -> tuple[transformers.BitsAndBytesConfig | autogptq.BaseQuantizeConfig, DictStrAny]:
# 8 bit configuration
int8_threshold = attrs.pop("llm_int8_threshhold", 6.0)
@@ -52,6 +52,8 @@ def infer_quantisation_config(cls: type[LLM[t.Any, t.Any]], quantise: QuantiseMo
if not is_autogptq_available():
logger.warning("'quantize=\"gptq\"' requires 'auto-gptq' to be installed (not available with local environment). Make sure to have 'auto-gptq' available locally: 'pip install \"openllm[gptq]\"'. OpenLLM will fallback to int8 with bitsandbytes.")
quantisation_config = create_int8_config(int8_skip_modules)
else: quantisation_config = autogptq.BaseQuantizeConfig(**autogptq_attrs)
else: raise ValueError(f"'quantize' must be one of ['int8', 'int4', 'gptq'], got {quantise} instead.")
else:
quantisation_config = autogptq.BaseQuantizeConfig(**autogptq_attrs)
else:
raise ValueError(f"'quantize' must be one of ['int8', 'int4', 'gptq'], got {quantise} instead.")
return quantisation_config, attrs

View File

@@ -1,12 +1,13 @@
# mypy: disable-error-code="call-arg,misc,attr-defined,type-abstract"
from __future__ import annotations
import os, warnings, orjson, bentoml, openllm, typing as t
import os, warnings, orjson, bentoml, openllm, openllm_core, typing as t
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
if t.TYPE_CHECKING:
from starlette.requests import Request
from starlette.responses import Response
from bentoml._internal.runner.runner import RunnerMethod
from bentoml._internal.runner.runner import RunnerMethod, AbstractRunner
# The following warnings from bitsandbytes, and probably not that important for users to see
warnings.filterwarnings("ignore", message="MatMul8bitLt: inputs will be cast from torch.float32 to float16 during quantization")
warnings.filterwarnings("ignore", message="MatMul8bitLt: inputs will be cast from torch.bfloat16 to float16 during quantization")
@@ -15,51 +16,83 @@ model = os.environ.get("OPENLLM_MODEL", "{__model_name__}") # openllm: model na
adapter_map = os.environ.get("OPENLLM_ADAPTER_MAP", """{__model_adapter_map__}""") # openllm: model adapter map
llm_config = openllm.AutoConfig.for_model(model)
runner = openllm.Runner(model, llm_config=llm_config, ensure_available=False, adapter_map=orjson.loads(adapter_map))
generic_embedding_runner = bentoml.Runner(openllm.GenericEmbeddingRunnable, name="llm-generic-embedding", scheduling_strategy=openllm.CascadingResourceStrategy, max_batch_size=32, max_latency_ms=300)
runners: t.Sequence[bentoml.Runner] = [runner]
generic_embedding_runner = bentoml.Runner(openllm.GenericEmbeddingRunnable, name="llm-generic-embedding", scheduling_strategy=openllm_core.CascadingResourceStrategy, max_batch_size=32, max_latency_ms=300) # type: ignore[arg-type] # XXX: remove once bentoml.Runner is correct set with type.
runners: list[AbstractRunner] = [runner]
if not runner.supports_embeddings: runners.append(generic_embedding_runner)
svc = bentoml.Service(name=f"llm-{llm_config['start_name']}-service", runners=runners)
_JsonInput=bentoml.io.JSON.from_sample({"prompt": "", "llm_config": llm_config.model_dump(flatten=True), "adapter_name": ""})
_JsonInput = bentoml.io.JSON.from_sample({"prompt": "", "llm_config": llm_config.model_dump(flatten=True), "adapter_name": ""})
@svc.api(route="/v1/generate", input=_JsonInput, output=bentoml.io.JSON.from_sample({"responses": [], "configuration": llm_config.model_dump(flatten=True)}))
async def generate_v1(input_dict: dict[str, t.Any]) -> openllm.GenerationOutput:
qa_inputs = openllm.GenerationInput.from_llm_config(llm_config)(**input_dict)
config = qa_inputs.llm_config.model_dump()
responses = await runner.generate.async_run(qa_inputs.prompt, **{"adapter_name": qa_inputs.adapter_name, **config})
return openllm.GenerationOutput(responses=responses, configuration=config)
@svc.api(route="/v1/generate_stream", input=_JsonInput,output=bentoml.io.Text(content_type="text/event_stream"))
@svc.api(route="/v1/generate_stream", input=_JsonInput, output=bentoml.io.Text(content_type="text/event_stream"))
async def generate_stream_v1(input_dict: dict[str, t.Any]) -> t.AsyncGenerator[str, None]:
qa_inputs = openllm.GenerationInput.from_llm_config(llm_config)(**input_dict)
return runner.generate_iterator.async_stream(qa_inputs.prompt, adapter_name=qa_inputs.adapter_name, **qa_inputs.llm_config.model_dump())
@svc.api(route="/v1/metadata", input=bentoml.io.Text(), output=bentoml.io.JSON.from_sample({"model_id": runner.llm.model_id, "timeout": 3600, "model_name": llm_config["model_name"], "framework": "pt", "configuration": "", "supports_embeddings": runner.supports_embeddings, "supports_hf_agent": runner.supports_hf_agent}))
def metadata_v1(_: str) -> openllm.MetadataOutput:
return openllm.MetadataOutput(timeout=llm_config["timeout"], model_name=llm_config["model_name"], framework=llm_config["env"]["framework_value"], model_id=runner.llm.model_id, configuration=llm_config.model_dump_json().decode(), supports_embeddings=runner.supports_embeddings, supports_hf_agent=runner.supports_hf_agent)
@svc.api(route="/v1/embeddings", input=bentoml.io.JSON.from_sample(["Hey Jude, welcome to the jungle!", "What is the meaning of life?"]), output=bentoml.io.JSON.from_sample({"embeddings": [0.007917795330286026, -0.014421648345887661, 0.00481307040899992, 0.007331526838243008, -0.0066398633643984795, 0.00945580005645752, 0.0087016262114048, -0.010709521360695362, 0.012635177001357079, 0.010541186667978764, -0.00730888033285737, -0.001783102168701589, 0.02339819073677063, -0.010825827717781067, -0.015888236463069916, 0.01876218430697918, 0.0076906150206923485, 0.0009032754460349679, -0.010024012066423893, 0.01090280432254076, -0.008668390102684498, 0.02070549875497818, 0.0014594447566196322, -0.018775740638375282, -0.014814382418990135, 0.01796768605709076], "num_tokens": 20}))
@svc.api(
route="/v1/embeddings",
input=bentoml.io.JSON.from_sample(["Hey Jude, welcome to the jungle!", "What is the meaning of life?"]),
output=bentoml.io.JSON.from_sample({
"embeddings": [
0.007917795330286026,
-0.014421648345887661,
0.00481307040899992,
0.007331526838243008,
-0.0066398633643984795,
0.00945580005645752,
0.0087016262114048,
-0.010709521360695362,
0.012635177001357079,
0.010541186667978764,
-0.00730888033285737,
-0.001783102168701589,
0.02339819073677063,
-0.010825827717781067,
-0.015888236463069916,
0.01876218430697918,
0.0076906150206923485,
0.0009032754460349679,
-0.010024012066423893,
0.01090280432254076,
-0.008668390102684498,
0.02070549875497818,
0.0014594447566196322,
-0.018775740638375282,
-0.014814382418990135,
0.01796768605709076
],
"num_tokens": 20
})
)
async def embeddings_v1(phrases: list[str]) -> openllm.EmbeddingsOutput:
embed_call: RunnerMethod[bentoml.Runnable | openllm.LLMRunnable[t.Any, t.Any], [list[str]], t.Sequence[openllm.LLMEmbeddings]] = runner.embeddings if runner.supports_embeddings else generic_embedding_runner.encode
embed_call: RunnerMethod[bentoml.Runnable | openllm.LLMRunnable[t.Any, t.Any], [list[str]], t.Sequence[openllm.LLMEmbeddings]] = runner.embeddings if runner.supports_embeddings else generic_embedding_runner.encode # type: ignore[type-arg,assignment,valid-type]
responses = (await embed_call.async_run(phrases))[0]
return openllm.EmbeddingsOutput(embeddings=responses["embeddings"], num_tokens=responses["num_tokens"])
if runner.supports_hf_agent and openllm.utils.is_transformers_supports_agent():
async def hf_agent(request: Request) -> Response:
json_str = await request.body()
try: input_data = openllm.utils.bentoml_cattr.structure(orjson.loads(json_str), openllm.HfAgentInput)
except orjson.JSONDecodeError as err: raise openllm.exceptions.OpenLLMException(f"Invalid JSON input received: {err}") from None
try:
input_data = openllm.utils.bentoml_cattr.structure(orjson.loads(json_str), openllm.HfAgentInput)
except orjson.JSONDecodeError as err:
raise openllm.exceptions.OpenLLMException(f"Invalid JSON input received: {err}") from None
stop = input_data.parameters.pop("stop", ["\n"])
try: return JSONResponse(await runner.generate_one.async_run(input_data.inputs, stop, **input_data.parameters), status_code=200)
except NotImplementedError: return JSONResponse(f"'{model}' is currently not supported with HuggingFace agents.", status_code=500)
try:
return JSONResponse(await runner.generate_one.async_run(input_data.inputs, stop, **input_data.parameters), status_code=200)
except NotImplementedError:
return JSONResponse(f"'{model}' is currently not supported with HuggingFace agents.", status_code=500)
hf_app = Starlette(debug=True, routes=[Route("/agent", hf_agent, methods=["POST"])])
svc.mount_asgi_app(hf_app, path="/hf")
async def list_adapter_v1(_: Request) -> Response:
res: dict[str, t.Any] = {}
if runner.peft_adapters["success"] is True: res["result"] = {k: v.to_dict() for k, v in runner.peft_adapters["result"].items()}
res.update({"success": runner.peft_adapters["success"], "error_msg": runner.peft_adapters["error_msg"]})
return JSONResponse(res, status_code=200)
adapters_app_v1 = Starlette(debug=True, routes=[Route("/adapters", list_adapter_v1, methods=["GET"])])
svc.mount_asgi_app(adapters_app_v1, path="/v1")

View File

@@ -5,26 +5,12 @@ These utilities will stay internal, and its API can be changed or updated withou
from __future__ import annotations
import os, typing as t
from openllm_core.utils import LazyModule
_import_structure: dict[str, list[str]] = {"_package": ["create_bento", "build_editable", "construct_python_options", "construct_docker_options"], "oci": ["CONTAINER_NAMES", "get_base_container_tag", "build_container", "get_base_container_name", "supported_registries", "RefResolver"]}
if t.TYPE_CHECKING:
from . import _package as _package, oci as oci
from ._package import (
build_editable as build_editable,
construct_docker_options as construct_docker_options,
construct_python_options as construct_python_options,
create_bento as create_bento,
)
from .oci import (
CONTAINER_NAMES as CONTAINER_NAMES,
RefResolver as RefResolver,
build_container as build_container,
get_base_container_name as get_base_container_name,
get_base_container_tag as get_base_container_tag,
supported_registries as supported_registries,
)
from ._package import build_editable as build_editable, construct_docker_options as construct_docker_options, construct_python_options as construct_python_options, create_bento as create_bento
from .oci import CONTAINER_NAMES as CONTAINER_NAMES, RefResolver as RefResolver, build_container as build_container, get_base_container_name as get_base_container_name, get_base_container_tag as get_base_container_tag, supported_registries as supported_registries
__lazy = LazyModule(__name__, os.path.abspath("__file__"), _import_structure)
__all__ = __lazy.__all__
__dir__ = __lazy.__dir__

View File

@@ -13,11 +13,9 @@ if t.TYPE_CHECKING:
from openllm_core._typing_compat import LiteralString, LiteralContainerRegistry, LiteralContainerVersionStrategy
from bentoml._internal.bento import BentoStore
from bentoml._internal.models.model import ModelStore
logger = logging.getLogger(__name__)
OPENLLM_DEV_BUILD = "OPENLLM_DEV_BUILD"
def build_editable(path: str, package: t.Literal["openllm", "openllm_core", "openllm_client"] = "openllm") -> str | None:
"""Build OpenLLM if the OPENLLM_DEV_BUILD environment variable is set."""
if str(os.environ.get(OPENLLM_DEV_BUILD, False)).lower() != "true": return None
@@ -26,7 +24,7 @@ def build_editable(path: str, package: t.Literal["openllm", "openllm_core", "ope
from build.env import IsolatedEnvBuilder
module_location = openllm_core.utils.pkg.source_locations(package)
if not module_location: raise RuntimeError("Could not find the source location of OpenLLM. Make sure to unset OPENLLM_DEV_BUILD if you are developing OpenLLM.")
pyproject_path = Path(module_location).parent.parent/"pyproject.toml"
pyproject_path = Path(module_location).parent.parent / "pyproject.toml"
if os.path.isfile(pyproject_path.__fspath__()):
logger.info("Generating built wheels for package %s...", package)
with IsolatedEnvBuilder() as env:
@@ -36,7 +34,6 @@ def build_editable(path: str, package: t.Literal["openllm", "openllm_core", "ope
env.install(builder.build_system_requires)
return builder.build("wheel", path, config_settings={"--global-option": "--quiet"})
raise RuntimeError("Custom OpenLLM build is currently not supported. Please install OpenLLM from PyPI or built it from Git source.")
def construct_python_options(llm: openllm.LLM[t.Any, t.Any], llm_fs: FS, extra_dependencies: tuple[str, ...] | None = None, adapter_map: dict[str, str | None] | None = None,) -> PythonOptions:
packages = ["openllm", "scipy"] # apparently bnb misses this one
if adapter_map is not None: packages += ["openllm[fine-tune]"]
@@ -67,7 +64,8 @@ def construct_python_options(llm: openllm.LLM[t.Any, t.Any], llm_fs: FS, extra_d
_tf_version = importlib.metadata.version(candidate)
packages.extend([f"tensorflow>={_tf_version}"])
break
except importlib.metadata.PackageNotFoundError: pass # Ok to ignore here since we actually need to check for all possible tensorflow distribution.
except importlib.metadata.PackageNotFoundError:
pass # Ok to ignore here since we actually need to check for all possible tensorflow distribution.
else:
if not openllm_core.utils.is_torch_available(): raise ValueError("PyTorch is not available. Make sure to have it locally installed.")
packages.extend([f'torch>={importlib.metadata.version("torch")}'])
@@ -75,18 +73,12 @@ def construct_python_options(llm: openllm.LLM[t.Any, t.Any], llm_fs: FS, extra_d
built_wheels: list[str | None] = [build_editable(llm_fs.getsyspath("/"), t.cast(t.Literal["openllm", "openllm_core", "openllm_client"], p)) for p in ("openllm_core", "openllm_client", "openllm")]
if all(i for i in built_wheels): wheels.extend([llm_fs.getsyspath(f"/{i.split('/')[-1]}") for i in t.cast(t.List[str], built_wheels)])
return PythonOptions(packages=packages, wheels=wheels, lock_packages=False, extra_index_url=["https://download.pytorch.org/whl/cu118"])
def construct_docker_options(llm: openllm.LLM[t.Any, t.Any], _: FS, workers_per_resource: float, quantize: LiteralString | None, bettertransformer: bool | None, adapter_map: dict[str, str | None] | None, dockerfile_template: str | None, runtime: t.Literal["ggml", "transformers"], serialisation_format: t.Literal["safetensors", "legacy"], container_registry: LiteralContainerRegistry, container_version_strategy: LiteralContainerVersionStrategy) -> DockerOptions:
from openllm.cli._factory import parse_config_options
environ = parse_config_options(llm.config, llm.config["timeout"], workers_per_resource, None, True, os.environ.copy())
env: openllm_core.utils.EnvVarMixin = llm.config["env"]
if env["framework_value"] == "vllm": serialisation_format = "legacy"
env_dict = {
env.framework: env["framework_value"], env.config: f"'{llm.config.model_dump_json().decode()}'",
env.model_id: f"/home/bentoml/bento/models/{llm.tag.path()}",
"OPENLLM_MODEL": llm.config["model_name"], "OPENLLM_SERIALIZATION": serialisation_format,
"OPENLLM_ADAPTER_MAP": f"'{orjson.dumps(adapter_map).decode()}'", "BENTOML_DEBUG": str(True), "BENTOML_QUIET": str(False), "BENTOML_CONFIG_OPTIONS": f"'{environ['BENTOML_CONFIG_OPTIONS']}'",
}
env_dict = {env.framework: env["framework_value"], env.config: f"'{llm.config.model_dump_json().decode()}'", env.model_id: f"/home/bentoml/bento/models/{llm.tag.path()}", "OPENLLM_MODEL": llm.config["model_name"], "OPENLLM_SERIALIZATION": serialisation_format, "OPENLLM_ADAPTER_MAP": f"'{orjson.dumps(adapter_map).decode()}'", "BENTOML_DEBUG": str(True), "BENTOML_QUIET": str(False), "BENTOML_CONFIG_OPTIONS": f"'{environ['BENTOML_CONFIG_OPTIONS']}'",}
if adapter_map: env_dict["BITSANDBYTES_NOWELCOME"] = os.environ.get("BITSANDBYTES_NOWELCOME", "1")
# We need to handle None separately here, as env from subprocess doesn't accept None value.
@@ -96,43 +88,60 @@ def construct_docker_options(llm: openllm.LLM[t.Any, t.Any], _: FS, workers_per_
if _env["quantize_value"] is not None: env_dict[_env.quantize] = t.cast(str, _env["quantize_value"])
env_dict[_env.runtime] = _env["runtime_value"]
return DockerOptions(base_image=f"{oci.CONTAINER_NAMES[container_registry]}:{oci.get_base_container_tag(container_version_strategy)}", env=env_dict, dockerfile_template=dockerfile_template)
OPENLLM_MODEL_NAME = "# openllm: model name"
OPENLLM_MODEL_ADAPTER_MAP = "# openllm: model adapter map"
class ModelNameFormatter(string.Formatter):
model_keyword: LiteralString = "__model_name__"
def __init__(self, model_name: str):
"""The formatter that extends model_name to be formatted the 'service.py'."""
super().__init__()
self.model_name = model_name
def vformat(self, format_string: str, *args: t.Any, **attrs: t.Any) -> t.Any: return super().vformat(format_string, (), {self.model_keyword: self.model_name})
def vformat(self, format_string: str, *args: t.Any, **attrs: t.Any) -> t.Any:
return super().vformat(format_string, (), {self.model_keyword: self.model_name})
def can_format(self, value: str) -> bool:
try:
self.parse(value)
return True
except ValueError: return False
except ValueError:
return False
class ModelIdFormatter(ModelNameFormatter):
model_keyword: LiteralString = "__model_id__"
class ModelAdapterMapFormatter(ModelNameFormatter):
model_keyword: LiteralString = "__model_adapter_map__"
_service_file = Path(os.path.abspath(__file__)).parent.parent/"_service.py"
_service_file = Path(os.path.abspath(__file__)).parent.parent / "_service.py"
def write_service(llm: openllm.LLM[t.Any, t.Any], adapter_map: dict[str, str | None] | None, llm_fs: FS) -> None:
from openllm_core.utils import DEBUG
model_name = llm.config["model_name"]
logger.debug("Generating service file for %s at %s (dir=%s)", model_name, llm.config["service_name"], llm_fs.getsyspath("/"))
with open(_service_file.__fspath__(), "r") as f: src_contents = f.readlines()
with open(_service_file.__fspath__(), "r") as f:
src_contents = f.readlines()
for it in src_contents:
if OPENLLM_MODEL_NAME in it: src_contents[src_contents.index(it)] = (ModelNameFormatter(model_name).vformat(it)[:-(len(OPENLLM_MODEL_NAME) + 3)] + "\n")
elif OPENLLM_MODEL_ADAPTER_MAP in it: src_contents[src_contents.index(it)] = (ModelAdapterMapFormatter(orjson.dumps(adapter_map).decode()).vformat(it)[:-(len(OPENLLM_MODEL_ADAPTER_MAP) + 3)] + "\n")
script = f"# GENERATED BY 'openllm build {model_name}'. DO NOT EDIT\n\n" + "".join(src_contents)
if DEBUG: logger.info("Generated script:\n%s", script)
llm_fs.writetext(llm.config["service_name"], script)
@inject
def create_bento(bento_tag: bentoml.Tag, llm_fs: FS, llm: openllm.LLM[t.Any, t.Any], workers_per_resource: str | float, quantize: LiteralString | None, bettertransformer: bool | None, dockerfile_template: str | None, adapter_map: dict[str, str | None] | None = None, extra_dependencies: tuple[str, ...] | None = None,
runtime: t.Literal[ "ggml", "transformers"] = "transformers", serialisation_format: t.Literal["safetensors", "legacy"] = "safetensors", container_registry: LiteralContainerRegistry = "ecr", container_version_strategy: LiteralContainerVersionStrategy = "release",
_bento_store: BentoStore = Provide[BentoMLContainer.bento_store], _model_store: ModelStore = Provide[BentoMLContainer.model_store]) -> bentoml.Bento:
def create_bento(
bento_tag: bentoml.Tag,
llm_fs: FS,
llm: openllm.LLM[t.Any, t.Any],
workers_per_resource: str | float,
quantize: LiteralString | None,
bettertransformer: bool | None,
dockerfile_template: str | None,
adapter_map: dict[str, str | None] | None = None,
extra_dependencies: tuple[str, ...] | None = None,
runtime: t.Literal["ggml", "transformers"] = "transformers",
serialisation_format: t.Literal["safetensors", "legacy"] = "safetensors",
container_registry: LiteralContainerRegistry = "ecr",
container_version_strategy: LiteralContainerVersionStrategy = "release",
_bento_store: BentoStore = Provide[BentoMLContainer.bento_store],
_model_store: ModelStore = Provide[BentoMLContainer.model_store]
) -> bentoml.Bento:
framework_envvar = llm.config["env"]["framework_value"]
labels = dict(llm.identifying_params)
labels.update({"_type": llm.llm_type, "_framework": framework_envvar, "start_name": llm.config["start_name"], "base_name_or_path": llm.model_id, "bundler": "openllm.bundle"})
@@ -141,16 +150,26 @@ def create_bento(bento_tag: bentoml.Tag, llm_fs: FS, llm: openllm.LLM[t.Any, t.A
if workers_per_resource == "round_robin": workers_per_resource = 1.0
elif workers_per_resource == "conserved": workers_per_resource = 1.0 if openllm_core.utils.device_count() == 0 else float(1 / openllm_core.utils.device_count())
else:
try: workers_per_resource = float(workers_per_resource)
except ValueError: raise ValueError("'workers_per_resource' only accept ['round_robin', 'conserved'] as possible strategies.") from None
elif isinstance(workers_per_resource, int): workers_per_resource = float(workers_per_resource)
try:
workers_per_resource = float(workers_per_resource)
except ValueError:
raise ValueError("'workers_per_resource' only accept ['round_robin', 'conserved'] as possible strategies.") from None
elif isinstance(workers_per_resource, int):
workers_per_resource = float(workers_per_resource)
logger.info("Building Bento for '%s'", llm.config["start_name"])
# add service.py definition to this temporary folder
write_service(llm, adapter_map, llm_fs)
llm_spec = ModelSpec.from_item({"tag": str(llm.tag), "alias": llm.tag.name})
build_config = BentoBuildConfig(
service=f"{llm.config['service_name']}:svc", name=bento_tag.name, labels=labels, description=f"OpenLLM service for {llm.config['start_name']}", include=list(llm_fs.walk.files()), exclude=["/venv", "/.venv", "__pycache__/", "*.py[cod]", "*$py.class"], python=construct_python_options(llm, llm_fs, extra_dependencies, adapter_map), models=[llm_spec],
service=f"{llm.config['service_name']}:svc",
name=bento_tag.name,
labels=labels,
description=f"OpenLLM service for {llm.config['start_name']}",
include=list(llm_fs.walk.files()),
exclude=["/venv", "/.venv", "__pycache__/", "*.py[cod]", "*$py.class"],
python=construct_python_options(llm, llm_fs, extra_dependencies, adapter_map),
models=[llm_spec],
docker=construct_docker_options(llm, llm_fs, workers_per_resource, quantize, bettertransformer, adapter_map, dockerfile_template, runtime, serialisation_format, container_registry, container_version_strategy)
)

View File

@@ -10,7 +10,6 @@ if t.TYPE_CHECKING:
from openllm_core._typing_compat import LiteralContainerRegistry, LiteralContainerVersionStrategy
from ghapi import all
from openllm_core._typing_compat import RefTuple, LiteralString
all = openllm_core.utils.LazyLoader("all", globals(), "ghapi.all") # noqa: F811
logger = logging.getLogger(__name__)
@@ -29,19 +28,17 @@ _OWNER = "bentoml"
_REPO = "openllm"
_module_location = openllm_core.utils.pkg.source_locations("openllm")
@functools.lru_cache
@openllm_core.utils.apply(str.lower)
def get_base_container_name(reg: LiteralContainerRegistry) -> str: return _CONTAINER_REGISTRY[reg]
def _convert_version_from_string(s: str) -> VersionInfo: return VersionInfo.from_version_string(s)
def _commit_time_range(r: int = 5) -> str: return (datetime.now(timezone.utc) - timedelta(days=r)).strftime("%Y-%m-%dT%H:%M:%SZ")
def get_base_container_name(reg: LiteralContainerRegistry) -> str:
return _CONTAINER_REGISTRY[reg]
def _convert_version_from_string(s: str) -> VersionInfo:
return VersionInfo.from_version_string(s)
def _commit_time_range(r: int = 5) -> str:
return (datetime.now(timezone.utc) - timedelta(days=r)).strftime("%Y-%m-%dT%H:%M:%SZ")
class VersionNotSupported(openllm.exceptions.OpenLLMException):
"""Raised when the stable release is too low that it doesn't include OpenLLM base container."""
_RefTuple: type[RefTuple] = openllm_core.utils.codegen.make_attr_tuple_class("_RefTuple", ["git_hash", "version", "strategy"])
def nightly_resolver(cls: type[RefResolver]) -> str:
# NOTE: all openllm container will have sha-<git_hash[:7]>
# This will use docker to run skopeo to determine the correct latest tag that is available
@@ -53,15 +50,17 @@ def nightly_resolver(cls: type[RefResolver]) -> str:
return next(f'sha-{it["sha"][:7]}' for it in commits if "[skip ci]" not in it["commit"]["message"])
# now is the correct behaviour
return orjson.loads(subprocess.check_output([docker_bin, "run", "--rm", "-it", "quay.io/skopeo/stable:latest", "list-tags", "docker://ghcr.io/bentoml/openllm"]).decode().strip())["Tags"][-2]
@attr.attrs(eq=False, order=False, slots=True, frozen=True)
class RefResolver:
git_hash: str = attr.field()
version: openllm_core.utils.VersionInfo = attr.field(converter=_convert_version_from_string)
strategy: LiteralContainerVersionStrategy = attr.field()
_ghapi: t.ClassVar[all.GhApi] = all.GhApi(owner=_OWNER, repo=_REPO)
@classmethod
def _nightly_ref(cls) -> RefTuple: return _RefTuple((nightly_resolver(cls), "refs/heads/main", "nightly"))
def _nightly_ref(cls) -> RefTuple:
return _RefTuple((nightly_resolver(cls), "refs/heads/main", "nightly"))
@classmethod
def _release_ref(cls, version_str: str | None = None) -> RefTuple:
_use_base_strategy = version_str is None
@@ -70,9 +69,11 @@ class RefResolver:
meta: dict[str, t.Any] = cls._ghapi.repos.get_latest_release()
version_str = meta["name"].lstrip("v")
version: tuple[str, str | None] = (cls._ghapi.git.get_ref(ref=f"tags/{meta['name']}")["object"]["sha"], version_str)
else: version = ("", version_str)
else:
version = ("", version_str)
if openllm_core.utils.VersionInfo.from_version_string(t.cast(str, version_str)) < (0, 2, 12): raise VersionNotSupported(f"Version {version_str} doesn't support OpenLLM base container. Consider using 'nightly' or upgrade 'openllm>=0.2.12'")
return _RefTuple((*version, "release" if _use_base_strategy else "custom"))
@classmethod
@functools.lru_cache(maxsize=64)
def from_strategy(cls, strategy_or_version: t.Literal["release", "nightly"] | LiteralString | None = None) -> RefResolver:
@@ -85,19 +86,21 @@ class RefResolver:
else:
logger.warning("Using custom %s. Make sure that it is at lease 0.2.12 for base container support.", strategy_or_version)
return cls(*cls._release_ref(version_str=strategy_or_version))
@property
def tag(self) -> str:
# NOTE: latest tag can also be nightly, but discouraged to use it. For nightly refer to use sha-<git_hash_short>
if self.strategy == "latest": return "latest"
elif self.strategy == "nightly": return self.git_hash
else: return repr(self.version)
@functools.lru_cache(maxsize=256)
def get_base_container_tag(strategy: LiteralContainerVersionStrategy | None = None) -> str: return RefResolver.from_strategy(strategy).tag
def get_base_container_tag(strategy: LiteralContainerVersionStrategy | None = None) -> str:
return RefResolver.from_strategy(strategy).tag
def build_container(registries: LiteralContainerRegistry | t.Sequence[LiteralContainerRegistry] | None = None, version_strategy: LiteralContainerVersionStrategy = "release", push: bool = False, machine: bool = False) -> dict[str | LiteralContainerRegistry, str]:
try:
if not _BUILDER.health(): raise openllm.exceptions.Error
except (openllm.exceptions.Error, subprocess.CalledProcessError): raise RuntimeError("Building base container requires BuildKit (via Buildx) to be installed. See https://docs.docker.com/build/buildx/install/ for instalation instruction.") from None
except (openllm.exceptions.Error, subprocess.CalledProcessError):
raise RuntimeError("Building base container requires BuildKit (via Buildx) to be installed. See https://docs.docker.com/build/buildx/install/ for instalation instruction.") from None
if openllm_core.utils.device_count() == 0: raise RuntimeError("Building base container requires GPUs (None available)")
if not shutil.which("nvidia-container-runtime"): raise RuntimeError("NVIDIA Container Toolkit is required to compile CUDA kernel in container.")
if not _module_location: raise RuntimeError("Failed to determine source location of 'openllm'. (Possible broken installation)")
@@ -110,15 +113,16 @@ def build_container(registries: LiteralContainerRegistry | t.Sequence[LiteralCon
try:
outputs = _BUILDER.build(file=pathlib.Path(__file__).parent.joinpath("Dockerfile").resolve().__fspath__(), context_path=pyproject_path.parent.__fspath__(), tag=tuple(tags.values()), push=push, progress="plain" if openllm_core.utils.get_debug_mode() else "auto", quiet=machine)
if machine and outputs is not None: tags["image_sha"] = outputs.decode("utf-8").strip()
except Exception as err: raise openllm.exceptions.OpenLLMException(f"Failed to containerize base container images (Scroll up to see error above, or set OPENLLMDEVDEBUG=True for more traceback):\n{err}") from err
except Exception as err:
raise openllm.exceptions.OpenLLMException(f"Failed to containerize base container images (Scroll up to see error above, or set OPENLLMDEVDEBUG=True for more traceback):\n{err}") from err
return tags
if t.TYPE_CHECKING:
CONTAINER_NAMES: dict[LiteralContainerRegistry, str]
supported_registries: list[str]
__all__ = ["CONTAINER_NAMES", "get_base_container_tag", "build_container", "get_base_container_name", "supported_registries", "RefResolver"]
def __dir__() -> list[str]: return sorted(__all__)
def __dir__() -> list[str]:
return sorted(__all__)
def __getattr__(name: str) -> t.Any:
if name == "supported_registries": return functools.lru_cache(1)(lambda: list(_CONTAINER_REGISTRY))()
elif name == "CONTAINER_NAMES": return _CONTAINER_REGISTRY

View File

@@ -11,7 +11,6 @@ from . import termui
if t.TYPE_CHECKING:
import subprocess
from openllm_core._configuration import LLMConfig
logger = logging.getLogger(__name__)
P = ParamSpec("P")
@@ -19,13 +18,10 @@ LiteralOutput = t.Literal["json", "pretty", "porcelain"]
_AnyCallable = t.Callable[..., t.Any]
FC = t.TypeVar("FC", bound=t.Union[_AnyCallable, click.Command])
def bento_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]:
return [sc.CompletionItem(str(it.tag), help="Bento") for it in bentoml.list() if str(it.tag).startswith(incomplete) and all(k in it.info.labels for k in {"start_name", "bundler"})]
def model_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]:
return [sc.CompletionItem(inflection.dasherize(it), help="Model") for it in openllm.CONFIG_MAPPING if it.startswith(incomplete)]
def parse_config_options(config: LLMConfig, server_timeout: int, workers_per_resource: float, device: t.Tuple[str, ...] | None, cors: bool, environ: DictStrAny) -> DictStrAny:
# TODO: Support amd.com/gpu on k8s
_bentoml_config_options_env = environ.pop("BENTOML_CONFIG_OPTIONS", "")
@@ -41,9 +37,7 @@ def parse_config_options(config: LLMConfig, server_timeout: int, workers_per_res
environ["BENTOML_CONFIG_OPTIONS"] = _bentoml_config_options_env
if DEBUG: logger.debug("Setting BENTOML_CONFIG_OPTIONS=%s", _bentoml_config_options_env)
return environ
_adapter_mapping_key = "adapter_map"
def _id_callback(ctx: click.Context, _: click.Parameter, value: t.Tuple[str, ...] | None) -> None:
if not value: return None
if _adapter_mapping_key not in ctx.params: ctx.params[_adapter_mapping_key] = {}
@@ -51,28 +45,20 @@ def _id_callback(ctx: click.Context, _: click.Parameter, value: t.Tuple[str, ...
adapter_id, *adapter_name = v.rsplit(":", maxsplit=1)
# try to resolve the full path if users pass in relative,
# currently only support one level of resolve path with current directory
try: adapter_id = openllm.utils.resolve_user_filepath(adapter_id, os.getcwd())
except FileNotFoundError: pass
try:
adapter_id = openllm.utils.resolve_user_filepath(adapter_id, os.getcwd())
except FileNotFoundError:
pass
ctx.params[_adapter_mapping_key][adapter_id] = adapter_name[0] if len(adapter_name) > 0 else None
return None
def start_command_factory(group: click.Group, model: str, _context_settings: DictStrAny | None = None, _serve_grpc: bool = False) -> click.Command:
"""Generate a 'click.Command' for any given LLM.
Args:
group: the target ``click.Group`` to save this LLM cli under
model: The name of the model or the ``bentoml.Bento`` instance.
Returns:
The click.Command for starting the model server
Note that the internal commands will return the llm_config and a boolean determine
whether the server is run with GPU or not.
"""
llm_config = openllm.AutoConfig.for_model(model)
command_attrs: DictStrAny = dict(
name=llm_config["model_name"], context_settings=_context_settings or termui.CONTEXT_SETTINGS, short_help=f"Start a LLMServer for '{model}'", aliases=[llm_config["start_name"]] if llm_config["name_type"] == "dasherize" else None, help=f"""\
name=llm_config["model_name"],
context_settings=_context_settings or termui.CONTEXT_SETTINGS,
short_help=f"Start a LLMServer for '{model}'",
aliases=[llm_config["start_name"]] if llm_config["name_type"] == "dasherize" else None,
help=f"""\
{llm_config['env'].start_docstring}
\b
@@ -95,16 +81,14 @@ Available official model_id(s): [default: {llm_config['default_id']}]
if llm_config["requires_gpu"] and openllm.utils.device_count() < 1:
# NOTE: The model requires GPU, therefore we will return a dummy command
command_attrs.update({"short_help": "(Disabled because there is no GPU available)", "help": f"""{model} is currently not available to run on your local machine because it requires GPU for inference."""})
command_attrs.update({"short_help": "(Disabled because there is no GPU available)", "help": f"{model} is currently not available to run on your local machine because it requires GPU for inference."})
return noop_command(group, llm_config, _serve_grpc, **command_attrs)
@group.command(**command_attrs)
@start_decorator(llm_config, serve_grpc=_serve_grpc)
@click.pass_context
def start_cmd(
ctx: click.Context, /, server_timeout: int, model_id: str | None, model_version: str | None, workers_per_resource: t.Literal["conserved", "round_robin"] | LiteralString, device: t.Tuple[str, ...], quantize: t.Literal["int8", "int4", "gptq"] | None, bettertransformer: bool | None, runtime: t.Literal["ggml", "transformers"], fast: bool,
serialisation_format: t.Literal["safetensors", "legacy"], cors: bool, adapter_id: str | None, return_process: bool, **attrs: t.Any,
) -> LLMConfig | subprocess.Popen[bytes]:
def start_cmd(ctx: click.Context, /, server_timeout: int, model_id: str | None, model_version: str | None, workers_per_resource: t.Literal["conserved", "round_robin"] | LiteralString, device: t.Tuple[str, ...], quantize: t.Literal["int8", "int4", "gptq"] | None, bettertransformer: bool | None, runtime: t.Literal["ggml", "transformers"], fast: bool, serialisation_format: t.Literal["safetensors", "legacy"], cors: bool, adapter_id: str | None, return_process: bool, **attrs: t.Any,
) -> LLMConfig | subprocess.Popen[bytes]:
fast = str(fast).upper() in openllm.utils.ENV_VARS_TRUE_VALUES
if serialisation_format == "safetensors" and quantize is not None and os.environ.get("OPENLLM_SERIALIZATION_WARNING", str(True)).upper() in openllm.utils.ENV_VARS_TRUE_VALUES:
termui.echo(f"'--quantize={quantize}' might not work with 'safetensors' serialisation format. Use with caution!. To silence this warning, set \"OPENLLM_SERIALIZATION_WARNING=False\"\nNote: You can always fallback to '--serialisation legacy' when running quantisation.", fg="yellow")
@@ -176,7 +160,6 @@ Available official model_id(s): [default: {llm_config['default_id']}]
return config
return start_cmd
def noop_command(group: click.Group, llm_config: LLMConfig, _serve_grpc: bool, **command_attrs: t.Any) -> click.Command:
context_settings = command_attrs.pop("context_settings", {})
context_settings.update({"ignore_unknown_options": True, "allow_extra_args": True})
@@ -189,7 +172,6 @@ def noop_command(group: click.Group, llm_config: LLMConfig, _serve_grpc: bool, *
return llm_config
return noop
def prerequisite_check(ctx: click.Context, llm_config: LLMConfig, quantize: LiteralString | None, adapter_map: dict[str, str | None] | None, num_workers: int) -> None:
if adapter_map and not openllm.utils.is_peft_available(): ctx.fail("Using adapter requires 'peft' to be available. Make sure to install with 'pip install \"openllm[fine-tune]\"'")
if quantize and llm_config.default_implementation() == "vllm": ctx.fail(f"Quantization is not yet supported with vLLM. Set '{llm_config['env']['framework']}=\"pt\"' to run with quantization.")
@@ -197,20 +179,21 @@ def prerequisite_check(ctx: click.Context, llm_config: LLMConfig, quantize: Lite
if requirements is not None and len(requirements) > 0:
missing_requirements = [i for i in requirements if importlib.util.find_spec(inflection.underscore(i)) is None]
if len(missing_requirements) > 0: termui.echo(f"Make sure to have the following dependencies available: {missing_requirements}", fg="yellow")
def start_decorator(llm_config: LLMConfig, serve_grpc: bool = False) -> t.Callable[[FC], t.Callable[[FC], FC]]:
def wrapper(fn: FC) -> t.Callable[[FC], FC]:
composed = openllm.utils.compose(
llm_config.to_click_options, _http_server_args if not serve_grpc else _grpc_server_args,
cog.optgroup.group("General LLM Options", help=f"The following options are related to running '{llm_config['start_name']}' LLM Server."),
model_id_option(factory=cog.optgroup, model_env=llm_config["env"]),
model_version_option(factory=cog.optgroup),
cog.optgroup.option("--server-timeout", type=int, default=None, help="Server timeout in seconds"),
workers_per_resource_option(factory=cog.optgroup),
cors_option(factory=cog.optgroup),
fast_option(factory=cog.optgroup),
cog.optgroup.group(
"LLM Optimization Options", help="""Optimization related options.
llm_config.to_click_options,
_http_server_args if not serve_grpc else _grpc_server_args,
cog.optgroup.group("General LLM Options", help=f"The following options are related to running '{llm_config['start_name']}' LLM Server."),
model_id_option(factory=cog.optgroup, model_env=llm_config["env"]),
model_version_option(factory=cog.optgroup),
cog.optgroup.option("--server-timeout", type=int, default=None, help="Server timeout in seconds"),
workers_per_resource_option(factory=cog.optgroup),
cors_option(factory=cog.optgroup),
fast_option(factory=cog.optgroup),
cog.optgroup.group(
"LLM Optimization Options",
help="""Optimization related options.
OpenLLM supports running model with [BetterTransformer](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/),
k-bit quantization (8-bit, 4-bit), GPTQ quantization, PagedAttention via vLLM.
@@ -220,14 +203,13 @@ def start_decorator(llm_config: LLMConfig, serve_grpc: bool = False) -> t.Callab
- DeepSpeed Inference: [link](https://www.deepspeed.ai/inference/)
- GGML: Fast inference on [bare metal](https://github.com/ggerganov/ggml)
""",
),
cog.optgroup.option("--device", type=openllm.utils.dantic.CUDA, multiple=True, envvar="CUDA_VISIBLE_DEVICES", callback=parse_device_callback, help=f"Assign GPU devices (if available) for {llm_config['model_name']}.", show_envvar=True),
cog.optgroup.option("--runtime", type=click.Choice(["ggml", "transformers"]), default="transformers", help="The runtime to use for the given model. Default is transformers."),
quantize_option(factory=cog.optgroup, model_env=llm_config["env"]),
bettertransformer_option(factory=cog.optgroup, model_env=llm_config["env"]),
serialisation_option(factory=cog.optgroup),
cog.optgroup.group(
"Fine-tuning related options", help="""\
),
cog.optgroup.option("--device", type=openllm.utils.dantic.CUDA, multiple=True, envvar="CUDA_VISIBLE_DEVICES", callback=parse_device_callback, help=f"Assign GPU devices (if available) for {llm_config['model_name']}.", show_envvar=True),
cog.optgroup.option("--runtime", type=click.Choice(["ggml", "transformers"]), default="transformers", help="The runtime to use for the given model. Default is transformers."),
quantize_option(factory=cog.optgroup, model_env=llm_config["env"]),
bettertransformer_option(factory=cog.optgroup, model_env=llm_config["env"]),
serialisation_option(factory=cog.optgroup),
cog.optgroup.group("Fine-tuning related options", help="""\
Note that the argument `--adapter-id` can accept the following format:
- `--adapter-id /path/to/adapter` (local adapter)
@@ -241,14 +223,13 @@ def start_decorator(llm_config: LLMConfig, serve_grpc: bool = False) -> t.Callab
$ openllm start opt --adapter-id /path/to/adapter_dir --adapter-id remote/adapter:eng_lora
```
""",
),
cog.optgroup.option("--adapter-id", default=None, help="Optional name or path for given LoRA adapter" + f" to wrap '{llm_config['model_name']}'", multiple=True, callback=_id_callback, metavar="[PATH | [remote/][adapter_name:]adapter_id][, ...]"),
click.option("--return-process", is_flag=True, default=False, help="Internal use only.", hidden=True),
"""),
cog.optgroup.option("--adapter-id", default=None, help="Optional name or path for given LoRA adapter" + f" to wrap '{llm_config['model_name']}'", multiple=True, callback=_id_callback, metavar="[PATH | [remote/][adapter_name:]adapter_id][, ...]"),
click.option("--return-process", is_flag=True, default=False, help="Internal use only.", hidden=True),
)
return composed(fn)
return wrapper
return wrapper
def parse_device_callback(ctx: click.Context, param: click.Parameter, value: tuple[tuple[str], ...] | None) -> t.Tuple[str, ...] | None:
if value is None: return value
if not isinstance(value, tuple): ctx.fail(f"{param} only accept multiple values, not {type(value)} (value: {value})")
@@ -256,12 +237,10 @@ def parse_device_callback(ctx: click.Context, param: click.Parameter, value: tup
# NOTE: --device all is a special case
if len(el) == 1 and el[0] == "all": return tuple(map(str, openllm.utils.available_devices()))
return el
# NOTE: A list of bentoml option that is not needed for parsing.
# NOTE: User shouldn't set '--working-dir', as OpenLLM will setup this.
# NOTE: production is also deprecated
_IGNORED_OPTIONS = {"working_dir", "production", "protocol_version"}
def parse_serve_args(serve_grpc: bool) -> t.Callable[[t.Callable[..., LLMConfig]], t.Callable[[FC], FC]]:
"""Parsing `bentoml serve|serve-grpc` click.Option to be parsed via `openllm start`."""
from bentoml_cli.cli import cli
@@ -285,10 +264,9 @@ def parse_serve_args(serve_grpc: bool) -> t.Callable[[t.Callable[..., LLMConfig]
param_decls = (*attrs.pop("opts"), *attrs.pop("secondary_opts"))
f = cog.optgroup.option(*param_decls, **attrs)(f)
return group(f)
return decorator
_http_server_args, _grpc_server_args = parse_serve_args(False), parse_serve_args(True)
def _click_factory_type(*param_decls: t.Any, **attrs: t.Any) -> t.Callable[[FC | None], FC]:
"""General ``@click`` decorator with some sauce.
@@ -298,117 +276,147 @@ def _click_factory_type(*param_decls: t.Any, **attrs: t.Any) -> t.Callable[[FC |
factory = attrs.pop("factory", click)
factory_attr = attrs.pop("attr", "option")
if factory_attr != "argument": attrs.setdefault("help", "General option for OpenLLM CLI.")
def decorator(f: FC | None) -> FC:
callback = getattr(factory, factory_attr, None)
if callback is None: raise ValueError(f"Factory {factory} has no attribute {factory_attr}.")
return t.cast(FC, callback(*param_decls, **attrs)(f) if f is not None else callback(*param_decls, **attrs))
return decorator
return decorator
cli_option = functools.partial(_click_factory_type, attr="option")
cli_argument = functools.partial(_click_factory_type, attr="argument")
def output_option(f: _AnyCallable | None = None, *, default_value: LiteralOutput = "pretty", **attrs: t.Any) -> t.Callable[[FC], FC]:
output = ["json", "pretty", "porcelain"]
def complete_output_var(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[CompletionItem]: return [CompletionItem(it) for it in output]
def complete_output_var(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[CompletionItem]:
return [CompletionItem(it) for it in output]
return cli_option("-o", "--output", "output", type=click.Choice(output), default=default_value, help="Showing output type.", show_default=True, envvar="OPENLLM_OUTPUT", show_envvar=True, shell_complete=complete_output_var, **attrs)(f)
def fast_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
"--fast/--no-fast", show_default=True, default=False, envvar="OPENLLM_USE_LOCAL_LATEST", show_envvar=True, help="""Whether to skip checking if models is already in store.
return cli_option("--fast/--no-fast", show_default=True, default=False, envvar="OPENLLM_USE_LOCAL_LATEST", show_envvar=True, help="""Whether to skip checking if models is already in store.
This is useful if you already downloaded or setup the model beforehand.
""", **attrs
)(f)
def cors_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]: return cli_option("--cors/--no-cors", show_default=True, default=False, envvar="OPENLLM_CORS", show_envvar=True, help="Enable CORS for the server.", **attrs)(f)
def machine_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]: return cli_option("--machine", is_flag=True, default=False, hidden=True, **attrs)(f)
def model_id_option(f: _AnyCallable | None = None, *, model_env: openllm.utils.EnvVarMixin | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]: return cli_option("--model-id", type=click.STRING, default=None, envvar=model_env.model_id if model_env is not None else None, show_envvar=model_env is not None, help="Optional model_id name or path for (fine-tune) weight.", **attrs)(f)
def model_version_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]: return cli_option("--model-version", type=click.STRING, default=None, help="Optional model version to save for this model. It will be inferred automatically from model-id.", **attrs)(f)
def model_name_argument(f: _AnyCallable | None = None, required: bool = True, **attrs: t.Any) -> t.Callable[[FC], FC]: return cli_argument("model_name", type=click.Choice([inflection.dasherize(name) for name in openllm.CONFIG_MAPPING]), required=required, **attrs)(f)
""", **attrs)(f)
def cors_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option("--cors/--no-cors", show_default=True, default=False, envvar="OPENLLM_CORS", show_envvar=True, help="Enable CORS for the server.", **attrs)(f)
def machine_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option("--machine", is_flag=True, default=False, hidden=True, **attrs)(f)
def model_id_option(f: _AnyCallable | None = None, *, model_env: openllm.utils.EnvVarMixin | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option("--model-id", type=click.STRING, default=None, envvar=model_env.model_id if model_env is not None else None, show_envvar=model_env is not None, help="Optional model_id name or path for (fine-tune) weight.", **attrs)(f)
def model_version_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option("--model-version", type=click.STRING, default=None, help="Optional model version to save for this model. It will be inferred automatically from model-id.", **attrs)(f)
def model_name_argument(f: _AnyCallable | None = None, required: bool = True, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_argument("model_name", type=click.Choice([inflection.dasherize(name) for name in openllm.CONFIG_MAPPING]), required=required, **attrs)(f)
def quantize_option(f: _AnyCallable | None = None, *, build: bool = False, model_env: openllm.utils.EnvVarMixin | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
"--quantise", "--quantize", "quantize", type=click.Choice(["int8", "int4", "gptq"]), default=None, envvar=model_env.quantize if model_env is not None else None, show_envvar=model_env is not None, help="""Dynamic quantization for running this LLM.
"--quantise",
"--quantize",
"quantize",
type=click.Choice(["int8", "int4", "gptq"]),
default=None,
envvar=model_env.quantize if model_env is not None else None,
show_envvar=model_env is not None,
help="""Dynamic quantization for running this LLM.
The following quantization strategies are supported:
The following quantization strategies are supported:
- ``int8``: ``LLM.int8`` for [8-bit](https://arxiv.org/abs/2208.07339) quantization.
- ``int8``: ``LLM.int8`` for [8-bit](https://arxiv.org/abs/2208.07339) quantization.
- ``int4``: ``SpQR`` for [4-bit](https://arxiv.org/abs/2306.03078) quantization.
- ``int4``: ``SpQR`` for [4-bit](https://arxiv.org/abs/2306.03078) quantization.
- ``gptq``: ``GPTQ`` [quantization](https://arxiv.org/abs/2210.17323)
- ``gptq``: ``GPTQ`` [quantization](https://arxiv.org/abs/2210.17323)
> [!NOTE] that the model can also be served with quantized weights.
""" + (
"""
> [!NOTE] that this will set the mode for serving within deployment.""" if build else ""
) + """
> [!NOTE] that quantization are currently only available in *PyTorch* models.""", **attrs
> [!NOTE] that the model can also be served with quantized weights.
""" + ("""
> [!NOTE] that this will set the mode for serving within deployment.""" if build else "") + """
> [!NOTE] that quantization are currently only available in *PyTorch* models.""",
**attrs
)(f)
def workers_per_resource_option(f: _AnyCallable | None = None, *, build: bool = False, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
"--workers-per-resource", default=None, callback=workers_per_resource_callback, type=str, required=False, help="""Number of workers per resource assigned.
"--workers-per-resource",
default=None,
callback=workers_per_resource_callback,
type=str,
required=False,
help="""Number of workers per resource assigned.
See https://docs.bentoml.org/en/latest/guides/scheduling.html#resource-scheduling-strategy
for more information. By default, this is set to 1.
See https://docs.bentoml.org/en/latest/guides/scheduling.html#resource-scheduling-strategy
for more information. By default, this is set to 1.
> [!NOTE] ``--workers-per-resource`` will also accept the following strategies:
> [!NOTE] ``--workers-per-resource`` will also accept the following strategies:
- ``round_robin``: Similar behaviour when setting ``--workers-per-resource 1``. This is useful for smaller models.
- ``round_robin``: Similar behaviour when setting ``--workers-per-resource 1``. This is useful for smaller models.
- ``conserved``: This will determine the number of available GPU resources, and only assign one worker for the LLMRunner. For example, if ther are 4 GPUs available, then ``conserved`` is equivalent to ``--workers-per-resource 0.25``.
""" + (
"""\n
> [!NOTE] The workers value passed into 'build' will determine how the LLM can
> be provisioned in Kubernetes as well as in standalone container. This will
> ensure it has the same effect with 'openllm start --workers ...'""" if build else ""
), **attrs
- ``conserved``: This will determine the number of available GPU resources, and only assign one worker for the LLMRunner. For example, if ther are 4 GPUs available, then ``conserved`` is equivalent to ``--workers-per-resource 0.25``.
""" + ("""\n
> [!NOTE] The workers value passed into 'build' will determine how the LLM can
> be provisioned in Kubernetes as well as in standalone container. This will
> ensure it has the same effect with 'openllm start --api-workers ...'""" if build else ""),
**attrs
)(f)
def bettertransformer_option(f: _AnyCallable | None = None, *, build: bool = False, model_env: openllm.utils.EnvVarMixin | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
"--bettertransformer", is_flag=True, default=None, envvar=model_env.bettertransformer if model_env is not None else None, show_envvar=model_env is not None, help="Apply FasterTransformer wrapper to serve model. This will applies during serving time." if not build else "Set default environment variable whether to serve this model with FasterTransformer in build time.", **attrs
)(f)
return cli_option("--bettertransformer", is_flag=True, default=None, envvar=model_env.bettertransformer if model_env is not None else None, show_envvar=model_env is not None, help="Apply FasterTransformer wrapper to serve model. This will applies during serving time." if not build else "Set default environment variable whether to serve this model with FasterTransformer in build time.", **attrs)(f)
def serialisation_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
"--serialisation", "--serialization", "serialisation_format", type=click.Choice(["safetensors", "legacy"]), default="safetensors", show_default=True, show_envvar=True, envvar="OPENLLM_SERIALIZATION", help="""Serialisation format for save/load LLM.
"--serialisation",
"--serialization",
"serialisation_format",
type=click.Choice(["safetensors", "legacy"]),
default="safetensors",
show_default=True,
show_envvar=True,
envvar="OPENLLM_SERIALIZATION",
help="""Serialisation format for save/load LLM.
Currently the following strategies are supported:
Currently the following strategies are supported:
- ``safetensors``: This will use safetensors format, which is synonymous to
- ``safetensors``: This will use safetensors format, which is synonymous to
\b
``safe_serialization=True``.
\b
``safe_serialization=True``.
\b
> [!NOTE] that this format might not work for every cases, and
you can always fallback to ``legacy`` if needed.
\b
> [!NOTE] that this format might not work for every cases, and
you can always fallback to ``legacy`` if needed.
- ``legacy``: This will use PyTorch serialisation format, often as ``.bin`` files.
This should be used if the model doesn't yet support safetensors.
- ``legacy``: This will use PyTorch serialisation format, often as ``.bin`` files. This should be used if the model doesn't yet support safetensors.
> [!NOTE] that GGML format is working in progress.
""", **attrs
> [!NOTE] that GGML format is working in progress.
""",
**attrs
)(f)
def container_registry_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
"--container-registry", "container_registry", type=click.Choice(list(openllm.bundle.CONTAINER_NAMES)), default="ecr", show_default=True, show_envvar=True, envvar="OPENLLM_CONTAINER_REGISTRY", callback=container_registry_callback, help="""The default container registry to get the base image for building BentoLLM.
"--container-registry",
"container_registry",
type=click.Choice(list(openllm.bundle.CONTAINER_NAMES)),
default="ecr",
show_default=True,
show_envvar=True,
envvar="OPENLLM_CONTAINER_REGISTRY",
callback=container_registry_callback,
help="""The default container registry to get the base image for building BentoLLM.
Currently, it supports 'ecr', 'ghcr.io', 'docker.io'
Currently, it supports 'ecr', 'ghcr.io', 'docker.io'
\b
> [!NOTE] that in order to build the base image, you will need a GPUs to compile custom kernel. See ``openllm ext build-base-container`` for more information.
""", **attrs
\b
> [!NOTE] that in order to build the base image, you will need a GPUs to compile custom kernel. See ``openllm ext build-base-container`` for more information.
""",
**attrs
)(f)
_wpr_strategies = {"round_robin", "conserved"}
def workers_per_resource_callback(ctx: click.Context, param: click.Parameter, value: str | None) -> str | None:
if value is None: return value
value = inflection.underscore(value)
if value in _wpr_strategies: return value
else:
try: float(value) # type: ignore[arg-type]
except ValueError: raise click.BadParameter(f"'workers_per_resource' only accept '{_wpr_strategies}' as possible strategies, otherwise pass in float.", ctx, param) from None
try:
float(value) # type: ignore[arg-type]
except ValueError:
raise click.BadParameter(f"'workers_per_resource' only accept '{_wpr_strategies}' as possible strategies, otherwise pass in float.", ctx, param) from None
else:
return value
def container_registry_callback(ctx: click.Context, param: click.Parameter, value: str | None) -> str | None:
if value is None: return value
if value not in openllm.bundle.supported_registries: raise click.BadParameter(f"Value must be one of {openllm.bundle.supported_registries}", ctx, param)

View File

@@ -10,11 +10,26 @@ if t.TYPE_CHECKING:
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import LiteralString, LiteralRuntime, LiteralContainerRegistry, LiteralContainerVersionStrategy
from bentoml._internal.bento import BentoStore
logger = logging.getLogger(__name__)
def _start(model_name: str, /, *, model_id: str | None = None, timeout: int = 30, workers_per_resource: t.Literal["conserved", "round_robin"] | float | None = None, device: tuple[str, ...] | t.Literal["all"] | None = None, quantize: t.Literal["int8", "int4", "gptq"] | None = None, bettertransformer: bool | None = None, runtime: t.Literal["ggml", "transformers"] = "transformers",
adapter_map: dict[LiteralString, str | None] | None = None, framework: LiteralRuntime | None = None, additional_args: list[str] | None = None, cors: bool = False, _serve_grpc: bool = False, __test__: bool = False, **_: t.Any) -> LLMConfig | subprocess.Popen[bytes]:
def _start(
model_name: str,
/,
*,
model_id: str | None = None,
timeout: int = 30,
workers_per_resource: t.Literal["conserved", "round_robin"] | float | None = None,
device: tuple[str, ...] | t.Literal["all"] | None = None,
quantize: t.Literal["int8", "int4", "gptq"] | None = None,
bettertransformer: bool | None = None,
runtime: t.Literal["ggml", "transformers"] = "transformers",
adapter_map: dict[LiteralString, str | None] | None = None,
framework: LiteralRuntime | None = None,
additional_args: list[str] | None = None,
cors: bool = False,
_serve_grpc: bool = False,
__test__: bool = False,
**_: t.Any
) -> LLMConfig | subprocess.Popen[bytes]:
"""Python API to start a LLM server. These provides one-to-one mapping to CLI arguments.
For all additional arguments, pass it as string to ``additional_args``. For example, if you want to
@@ -73,9 +88,31 @@ def _start(model_name: str, /, *, model_id: str | None = None, timeout: int = 30
if __test__: args.append("--return-process")
return start_command_factory(start_command if not _serve_grpc else start_grpc_command, model_name, _context_settings=termui.CONTEXT_SETTINGS, _serve_grpc=_serve_grpc).main(args=args if len(args) > 0 else None, standalone_mode=False)
@inject
def _build(model_name: str, /, *, model_id: str | None = None, model_version: str | None = None, bento_version: str | None = None, quantize: t.Literal["int8", "int4", "gptq"] | None = None, bettertransformer: bool | None = None, adapter_map: dict[str, str | None] | None = None, build_ctx: str | None = None, enable_features: tuple[str, ...] | None = None, workers_per_resource: float | None = None, runtime: t.Literal["ggml", "transformers"] = "transformers", dockerfile_template: str | None = None, overwrite: bool = False, container_registry: LiteralContainerRegistry | None = None, container_version_strategy: LiteralContainerVersionStrategy | None = None, push: bool = False, containerize: bool = False, serialisation_format: t.Literal["safetensors", "legacy"] = "safetensors", additional_args: list[str] | None = None, bento_store: BentoStore = Provide[BentoMLContainer.bento_store]) -> bentoml.Bento:
def _build(
model_name: str,
/,
*,
model_id: str | None = None,
model_version: str | None = None,
bento_version: str | None = None,
quantize: t.Literal["int8", "int4", "gptq"] | None = None,
bettertransformer: bool | None = None,
adapter_map: dict[str, str | None] | None = None,
build_ctx: str | None = None,
enable_features: tuple[str, ...] | None = None,
workers_per_resource: float | None = None,
runtime: t.Literal["ggml", "transformers"] = "transformers",
dockerfile_template: str | None = None,
overwrite: bool = False,
container_registry: LiteralContainerRegistry | None = None,
container_version_strategy: LiteralContainerVersionStrategy | None = None,
push: bool = False,
containerize: bool = False,
serialisation_format: t.Literal["safetensors", "legacy"] = "safetensors",
additional_args: list[str] | None = None,
bento_store: BentoStore = Provide[BentoMLContainer.bento_store]
) -> bentoml.Bento:
"""Package a LLM into a Bento.
The LLM will be built into a BentoService with the following structure:
@@ -155,7 +192,6 @@ def _build(model_name: str, /, *, model_id: str | None = None, model_version: st
matched = re.match(r"__tag__:([^:\n]+:[^:\n]+)$", output.decode("utf-8").strip())
if matched is None: raise ValueError(f"Failed to find tag from output: {output.decode('utf-8').strip()}\nNote: Output from 'openllm build' might not be correct. Please open an issue on GitHub.")
return bentoml.get(matched.group(1), _bento_store=bento_store)
def _import_model(model_name: str, /, *, model_id: str | None = None, model_version: str | None = None, runtime: t.Literal["ggml", "transformers"] = "transformers", implementation: LiteralRuntime = "pt", quantize: t.Literal["int8", "int4", "gptq"] | None = None, serialisation_format: t.Literal["legacy", "safetensors"] = "safetensors", additional_args: t.Sequence[str] | None = None) -> bentoml.Model:
"""Import a LLM into local store.
@@ -194,12 +230,9 @@ def _import_model(model_name: str, /, *, model_id: str | None = None, model_vers
if additional_args is not None: args.extend(additional_args)
if quantize is not None: args.extend(["--quantize", quantize])
return import_command.main(args=args, standalone_mode=False)
def _list_models() -> dict[str, t.Any]:
"""List all available models within the local store."""
from .entrypoint import models_command
return models_command.main(args=["-o", "json", "--show-available", "--machine"], standalone_mode=False)
start, start_grpc, build, import_model, list_models = openllm_core.utils.codegen.gen_sdk(_start, _serve_grpc=False), openllm_core.utils.codegen.gen_sdk(_start, _serve_grpc=True), openllm_core.utils.codegen.gen_sdk(_build), openllm_core.utils.codegen.gen_sdk(_import_model), openllm_core.utils.codegen.gen_sdk(_list_models)
__all__ = ["start", "start_grpc", "build", "import_model", "list_models"]

View File

@@ -26,57 +26,12 @@ from bentoml_cli.utils import BentoMLCommandGroup, opt_callback
from bentoml._internal.configuration.containers import BentoMLContainer
from bentoml._internal.models.model import ModelStore
from . import termui
from ._factory import (
FC,
LiteralOutput,
_AnyCallable,
bettertransformer_option,
container_registry_option,
fast_option,
machine_option,
model_id_option,
model_name_argument,
model_version_option,
output_option,
parse_device_callback,
quantize_option,
serialisation_option,
start_command_factory,
workers_per_resource_option,
)
from ._factory import FC, LiteralOutput, _AnyCallable, bettertransformer_option, container_registry_option, fast_option, machine_option, model_id_option, model_name_argument, model_version_option, output_option, parse_device_callback, quantize_option, serialisation_option, start_command_factory, workers_per_resource_option
from openllm import bundle, serialisation
from openllm.exceptions import OpenLLMException
from openllm.models.auto import (
CONFIG_MAPPING,
MODEL_FLAX_MAPPING_NAMES,
MODEL_MAPPING_NAMES,
MODEL_TF_MAPPING_NAMES,
MODEL_VLLM_MAPPING_NAMES,
AutoConfig,
AutoLLM,
)
from openllm.models.auto import CONFIG_MAPPING, MODEL_FLAX_MAPPING_NAMES, MODEL_MAPPING_NAMES, MODEL_TF_MAPPING_NAMES, MODEL_VLLM_MAPPING_NAMES, AutoConfig, AutoLLM
from openllm_core._typing_compat import DictStrAny, ParamSpec, Concatenate, LiteralString, Self, LiteralRuntime
from openllm_core.utils import (
DEBUG,
DEBUG_ENV_VAR,
OPTIONAL_DEPENDENCIES,
QUIET_ENV_VAR,
EnvVarMixin,
LazyLoader,
analytics,
bentoml_cattr,
compose,
configure_logging,
dantic,
first_not_none,
get_debug_mode,
get_quiet_mode,
is_torch_available,
is_transformers_supports_agent,
resolve_user_filepath,
set_debug_mode,
set_quiet_mode,
)
from openllm_core.utils import DEBUG, DEBUG_ENV_VAR, OPTIONAL_DEPENDENCIES, QUIET_ENV_VAR, EnvVarMixin, LazyLoader, analytics, bentoml_cattr, compose, configure_logging, dantic, first_not_none, get_debug_mode, get_quiet_mode, is_torch_available, is_transformers_supports_agent, resolve_user_filepath, set_debug_mode, set_quiet_mode
from openllm.utils import infer_auto_class
if t.TYPE_CHECKING:
@@ -85,7 +40,8 @@ if t.TYPE_CHECKING:
from bentoml._internal.container import DefaultBuilder
from openllm_core._schema import EmbeddingsOutput
from openllm_core._typing_compat import LiteralContainerRegistry, LiteralContainerVersionStrategy
else: torch = LazyLoader("torch", globals(), "torch")
else:
torch = LazyLoader("torch", globals(), "torch")
P = ParamSpec("P")
logger = logging.getLogger(__name__)
@@ -99,25 +55,27 @@ OPENLLM_FIGLET = """\
"""
ServeCommand = t.Literal["serve", "serve-grpc"]
@attr.define
class GlobalOptions:
cloud_context: str | None = attr.field(default=None)
def with_options(self, **attrs: t.Any) -> Self: return attr.evolve(self, **attrs)
def with_options(self, **attrs: t.Any) -> Self:
return attr.evolve(self, **attrs)
GrpType = t.TypeVar("GrpType", bound=click.Group)
_object_setattr = object.__setattr__
_EXT_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), "extension"))
class Extensions(click.MultiCommand):
def list_commands(self, ctx: click.Context) -> list[str]: return sorted([filename[:-3] for filename in os.listdir(_EXT_FOLDER) if filename.endswith(".py") and not filename.startswith("__")])
def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:
try: mod = __import__(f"openllm.cli.extension.{cmd_name}", None, None, ["cli"])
except ImportError: return None
return mod.cli
def list_commands(self, ctx: click.Context) -> list[str]:
return sorted([filename[:-3] for filename in os.listdir(_EXT_FOLDER) if filename.endswith(".py") and not filename.startswith("__")])
def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:
try:
mod = __import__(f"openllm.cli.extension.{cmd_name}", None, None, ["cli"])
except ImportError:
return None
return mod.cli
class OpenLLMCommandGroup(BentoMLCommandGroup):
NUMBER_OF_COMMON_PARAMS = 5 # parameters in common_params + 1 faked group option header
@@ -139,6 +97,7 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
elif debug: set_debug_mode(True)
configure_logging()
return f(*args, **attrs)
return wrapper
@staticmethod
@@ -148,7 +107,8 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
@functools.wraps(func)
def wrapper(do_not_track: bool, *args: P.args, **attrs: P.kwargs) -> t.Any:
if do_not_track:
with analytics.set_bentoml_tracking(): return func(*args, **attrs)
with analytics.set_bentoml_tracking():
return func(*args, **attrs)
start_time = time.time_ns()
with analytics.set_bentoml_tracking():
if group.name is None: raise ValueError("group.name should not be None")
@@ -166,16 +126,22 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
event.return_code = 2 if isinstance(e, KeyboardInterrupt) else 1
analytics.track(event)
raise
return t.cast(t.Callable[Concatenate[bool, P], t.Any], wrapper)
@staticmethod
def exception_handling(func: t.Callable[P, t.Any], group: click.Group, **attrs: t.Any) -> t.Callable[P, t.Any]:
command_name = attrs.get("name", func.__name__)
@functools.wraps(func)
def wrapper(*args: P.args, **attrs: P.kwargs) -> t.Any:
try: return func(*args, **attrs)
except OpenLLMException as err: raise click.ClickException(click.style(f"[{group.name}] '{command_name}' failed: " + err.message, fg="red")) from err
except KeyboardInterrupt: pass
try:
return func(*args, **attrs)
except OpenLLMException as err:
raise click.ClickException(click.style(f"[{group.name}] '{command_name}' failed: " + err.message, fg="red")) from err
except KeyboardInterrupt:
pass
return wrapper
def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:
@@ -183,13 +149,15 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
return t.cast("Extensions", extension_command).get_command(ctx, cmd_name)
cmd_name = self.resolve_alias(cmd_name)
if ctx.command.name in _start_mapping:
try: return _start_mapping[ctx.command.name][cmd_name]
try:
return _start_mapping[ctx.command.name][cmd_name]
except KeyError:
# TODO: support start from a bento
try:
bentoml.get(cmd_name)
raise click.ClickException(f"'openllm start {cmd_name}' is currently disabled for the time being. Please let us know if you need this feature by opening an issue on GitHub.")
except bentoml.exceptions.NotFound: pass
except bentoml.exceptions.NotFound:
pass
raise click.BadArgumentUsage(f"{cmd_name} is not a valid model identifier supported by OpenLLM.") from None
return super().get_command(ctx, cmd_name)
@@ -240,12 +208,13 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows: list[tuple[str, str]]= []
rows: list[tuple[str, str]] = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section(_("Commands")): formatter.write_dl(rows)
with formatter.section(_("Commands")):
formatter.write_dl(rows)
if len(extensions):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in extensions)
rows = []
@@ -253,8 +222,8 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
help = cmd.get_short_help_str(limit)
rows.append((inflection.dasherize(subcommand), help))
if rows:
with formatter.section(_("Extensions")): formatter.write_dl(rows)
with formatter.section(_("Extensions")):
formatter.write_dl(rows)
@click.group(cls=OpenLLMCommandGroup, context_settings=termui.CONTEXT_SETTINGS, name="openllm")
@click.version_option(None, "--version", "-v", message=f"%(prog)s, %(version)s (compiled: {'yes' if openllm.COMPILED else 'no'})\nPython ({platform.python_implementation()}) {platform.python_version()}")
def cli() -> None:
@@ -270,7 +239,6 @@ def cli() -> None:
An open platform for operating large language models in production.
Fine-tune, serve, deploy, and monitor any LLMs with ease.
"""
@cli.group(cls=OpenLLMCommandGroup, context_settings=termui.CONTEXT_SETTINGS, name="start", aliases=["start-http"])
def start_command() -> None:
"""Start any LLM as a REST server.
@@ -280,7 +248,6 @@ def start_command() -> None:
$ openllm <start|start-http> <model_name> --<options> ...
```
"""
@cli.group(cls=OpenLLMCommandGroup, context_settings=termui.CONTEXT_SETTINGS, name="start-grpc")
def start_grpc_command() -> None:
"""Start any LLM as a gRPC server.
@@ -290,9 +257,7 @@ def start_grpc_command() -> None:
$ openllm start-grpc <model_name> --<options> ...
```
"""
_start_mapping = {"start": {key: start_command_factory(start_command, key, _context_settings=termui.CONTEXT_SETTINGS) for key in CONFIG_MAPPING}, "start-grpc": {key: start_command_factory(start_grpc_command, key, _context_settings=termui.CONTEXT_SETTINGS, _serve_grpc=True) for key in CONFIG_MAPPING}}
@cli.command(name="import", aliases=["download"])
@model_name_argument
@click.argument("model_id", type=click.STRING, default=None, metavar="Optional[REMOTE_REPO/MODEL_ID | /path/to/local/model]", required=False)
@@ -378,7 +343,6 @@ def import_command(model_name: str, model_id: str | None, converter: str | None,
elif output == "json": termui.echo(orjson.dumps({"previously_setup": _previously_saved, "framework": impl, "tag": str(_ref.tag)}, option=orjson.OPT_INDENT_2).decode())
else: termui.echo(_ref.tag)
return _ref
@cli.command(context_settings={"token_normalize_func": inflection.underscore})
@model_name_argument
@model_id_option
@@ -407,8 +371,32 @@ def import_command(model_name: str, model_id: str | None, converter: str | None,
@click.option("--force-push", default=False, is_flag=True, type=click.BOOL, help="Whether to force push.")
@click.pass_context
def build_command(
ctx: click.Context, /, model_name: str, model_id: str | None, bento_version: str | None, overwrite: bool, output: LiteralOutput, runtime: t.Literal["ggml", "transformers"], quantize: t.Literal["int8", "int4", "gptq"] | None, enable_features: tuple[str, ...] | None, bettertransformer: bool | None, workers_per_resource: float | None, adapter_id: tuple[str, ...],
build_ctx: str | None, machine: bool, device: tuple[str, ...], model_version: str | None, dockerfile_template: t.TextIO | None, containerize: bool, push: bool, serialisation_format: t.Literal["safetensors", "legacy"], fast: bool, container_registry: LiteralContainerRegistry, container_version_strategy: LiteralContainerVersionStrategy, force_push: bool, **attrs: t.Any,
ctx: click.Context,
/,
model_name: str,
model_id: str | None,
bento_version: str | None,
overwrite: bool,
output: LiteralOutput,
runtime: t.Literal["ggml", "transformers"],
quantize: t.Literal["int8", "int4", "gptq"] | None,
enable_features: tuple[str, ...] | None,
bettertransformer: bool | None,
workers_per_resource: float | None,
adapter_id: tuple[str, ...],
build_ctx: str | None,
machine: bool,
device: tuple[str, ...],
model_version: str | None,
dockerfile_template: t.TextIO | None,
containerize: bool,
push: bool,
serialisation_format: t.Literal["safetensors", "legacy"],
fast: bool,
container_registry: LiteralContainerRegistry,
container_version_strategy: LiteralContainerVersionStrategy,
force_push: bool,
**attrs: t.Any,
) -> bentoml.Bento:
"""Package a given models into a Bento.
@@ -488,12 +476,9 @@ def build_command(
raise bentoml.exceptions.NotFound(f"Rebuilding existing Bento {bento_tag}") from None
_previously_built = True
except bentoml.exceptions.NotFound:
bento = bundle.create_bento(
bento_tag, llm_fs, llm, workers_per_resource=workers_per_resource, adapter_map=adapter_map,
quantize=quantize, bettertransformer=bettertransformer, extra_dependencies=enable_features, dockerfile_template=dockerfile_template_path, runtime=runtime,
container_registry=container_registry, container_version_strategy=container_version_strategy
)
except Exception as err: raise err from None
bento = bundle.create_bento(bento_tag, llm_fs, llm, workers_per_resource=workers_per_resource, adapter_map=adapter_map, quantize=quantize, bettertransformer=bettertransformer, extra_dependencies=enable_features, dockerfile_template=dockerfile_template_path, runtime=runtime, container_registry=container_registry, container_version_strategy=container_version_strategy)
except Exception as err:
raise err from None
if machine: termui.echo(f"__tag__:{bento.tag}", fg="white")
elif output == "pretty":
@@ -502,18 +487,23 @@ def build_command(
if not _previously_built: termui.echo(f"Successfully built {bento}.", fg="green")
elif not overwrite: termui.echo(f"'{model_name}' already has a Bento built [{bento}]. To overwrite it pass '--overwrite'.", fg="yellow")
termui.echo("📖 Next steps:\n\n" + f"* Push to BentoCloud with 'bentoml push':\n\t$ bentoml push {bento.tag}\n\n" + f"* Containerize your Bento with 'bentoml containerize':\n\t$ bentoml containerize {bento.tag} --opt progress=plain\n\n" + "\tTip: To enable additional BentoML features for 'containerize', use '--enable-features=FEATURE[,FEATURE]' [see 'bentoml containerize -h' for more advanced usage]\n", fg="blue",)
elif output == "json": termui.echo(orjson.dumps(bento.info.to_dict(), option=orjson.OPT_INDENT_2).decode())
else: termui.echo(bento.tag)
elif output == "json":
termui.echo(orjson.dumps(bento.info.to_dict(), option=orjson.OPT_INDENT_2).decode())
else:
termui.echo(bento.tag)
if push: BentoMLContainer.bentocloud_client.get().push_bento(bento, context=t.cast(GlobalOptions, ctx.obj).cloud_context, force=force_push)
elif containerize:
backend = t.cast("DefaultBuilder", os.environ.get("BENTOML_CONTAINERIZE_BACKEND", "docker"))
try: bentoml.container.health(backend)
except subprocess.CalledProcessError: raise OpenLLMException(f"Failed to use backend {backend}") from None
try: bentoml.container.build(bento.tag, backend=backend, features=("grpc", "io"))
except Exception as err: raise OpenLLMException(f"Exception caught while containerizing '{bento.tag!s}':\n{err}") from err
try:
bentoml.container.health(backend)
except subprocess.CalledProcessError:
raise OpenLLMException(f"Failed to use backend {backend}") from None
try:
bentoml.container.build(bento.tag, backend=backend, features=("grpc", "io"))
except Exception as err:
raise OpenLLMException(f"Exception caught while containerizing '{bento.tag!s}':\n{err}") from err
return bento
@cli.command()
@output_option
@click.option("--show-available", is_flag=True, default=False, help="Show available models in local store (mutually exclusive with '-o porcelain').")
@@ -601,7 +591,6 @@ def models_command(ctx: click.Context, output: LiteralOutput, show_available: bo
if show_available: json_data["local"] = local_models
termui.echo(orjson.dumps(json_data, option=orjson.OPT_INDENT_2,).decode(), fg="white")
ctx.exit(0)
@cli.command()
@model_name_argument(required=False)
@click.option("-y", "--yes", "--assume-yes", is_flag=True, help="Skip confirmation when deleting a specific model")
@@ -625,7 +614,6 @@ def prune_command(model_name: str | None, yes: bool, include_bentos: bool, model
if delete_confirmed:
store.delete(store_item.tag)
termui.echo(f"{store_item} deleted from {'model' if isinstance(store, ModelStore) else 'bento'} store.", fg="yellow")
def parsing_instruction_callback(ctx: click.Context, param: click.Parameter, value: list[str] | str | None) -> tuple[str, bool | str] | list[str] | str | None:
if value is None:
return value
@@ -644,11 +632,9 @@ def parsing_instruction_callback(ctx: click.Context, param: click.Parameter, val
return key, values[0]
else:
raise click.BadParameter(f"Invalid option format: {value}")
def shared_client_options(f: _AnyCallable | None = None, output_value: t.Literal["json", "porcelain", "pretty"] = "pretty") -> t.Callable[[FC], FC]:
options = [click.option("--endpoint", type=click.STRING, help="OpenLLM Server endpoint, i.e: http://localhost:3000", envvar="OPENLLM_ENDPOINT", default="http://localhost:3000",), click.option("--timeout", type=click.INT, default=30, help="Default server timeout", show_default=True), output_option(default_value=output_value),]
return compose(*options)(f) if f is not None else compose(*options)
@cli.command()
@click.argument("task", type=click.STRING, metavar="TASK")
@shared_client_options
@@ -668,8 +654,10 @@ def instruct_command(endpoint: str, timeout: int, agent: LiteralString, output:
"""
client = openllm.client.HTTPClient(endpoint, timeout=timeout)
try: client.call("metadata")
except http.client.BadStatusLine: raise click.ClickException(f"{endpoint} is neither a HTTP server nor reachable.") from None
try:
client.call("metadata")
except http.client.BadStatusLine:
raise click.ClickException(f"{endpoint} is neither a HTTP server nor reachable.") from None
if agent == "hf":
if not is_transformers_supports_agent(): raise click.UsageError("Transformers version should be at least 4.29 to support HfAgent. Upgrade with 'pip install -U transformers'")
_memoized = {k: v[0] for k, v in _memoized.items() if v}
@@ -681,7 +669,6 @@ def instruct_command(endpoint: str, timeout: int, agent: LiteralString, output:
return result
else:
raise click.BadOptionUsage("agent", f"Unknown agent type {agent}")
@cli.command()
@shared_client_options(output_value="json")
@click.option("--server-type", type=click.Choice(["grpc", "http"]), help="Server type", default="http", show_default=True)
@@ -712,7 +699,6 @@ def embed_command(ctx: click.Context, text: tuple[str, ...], endpoint: str, time
else:
termui.echo(gen_embed.embeddings, fg="white")
ctx.exit(0)
@cli.command()
@shared_client_options
@click.option("--server-type", type=click.Choice(["grpc", "http"]), help="Server type", default="http", show_default=True)
@@ -744,9 +730,7 @@ def query_command(ctx: click.Context, /, prompt: str, endpoint: str, timeout: in
else:
termui.echo(res["responses"], fg="white")
ctx.exit(0)
@cli.group(cls=Extensions, hidden=True, name="extension")
def extension_command() -> None:
"""Extension for OpenLLM CLI."""
if __name__ == "__main__": cli()

View File

@@ -4,7 +4,9 @@ from openllm.cli import termui
from openllm.cli._factory import machine_option, container_registry_option
if t.TYPE_CHECKING: from openllm_core._typing_compat import LiteralContainerRegistry, LiteralContainerVersionStrategy
@click.command(
"build_base_container", context_settings=termui.CONTEXT_SETTINGS, help="""Base image builder for BentoLLM.
"build_base_container",
context_settings=termui.CONTEXT_SETTINGS,
help="""Base image builder for BentoLLM.
By default, the base image will include custom kernels (PagedAttention via vllm, FlashAttention-v2, etc.) built with CUDA 11.8, Python 3.9 on Ubuntu22.04.
Optionally, this can also be pushed directly to remote registry. Currently support ``docker.io``, ``ghcr.io`` and ``quay.io``.

View File

@@ -7,7 +7,6 @@ from openllm.cli import termui
from openllm.cli._factory import bento_complete_envvar, machine_option
if t.TYPE_CHECKING: from bentoml._internal.bento import BentoStore
@click.command("dive_bentos", context_settings=termui.CONTEXT_SETTINGS)
@click.argument("bento", type=str, shell_complete=bento_complete_envvar)
@machine_option

View File

@@ -10,7 +10,6 @@ from openllm.cli._factory import bento_complete_envvar
from openllm_core.utils import bentoml_cattr
if t.TYPE_CHECKING: from bentoml._internal.bento import BentoStore
@click.command("get_containerfile", context_settings=termui.CONTEXT_SETTINGS, help="Return Containerfile of any given Bento.")
@click.argument("bento", type=str, shell_complete=bento_complete_envvar)
@click.pass_context

View File

@@ -4,9 +4,7 @@ from bentoml_cli.utils import opt_callback
from openllm.cli import termui
from openllm.cli._factory import model_complete_envvar, output_option, machine_option
from openllm_core._prompt import process_prompt
LiteralOutput = t.Literal["json", "pretty", "porcelain"]
@click.command("get_prompt", context_settings=termui.CONTEXT_SETTINGS)
@click.argument("model_name", type=click.Choice([inflection.dasherize(name) for name in openllm.CONFIG_MAPPING.keys()]), shell_complete=model_complete_envvar)
@click.argument("prompt", type=click.STRING)

View File

@@ -3,16 +3,12 @@ import click, inflection, orjson, bentoml, openllm
from bentoml._internal.utils import human_readable_size
from openllm.cli import termui
from openllm.cli._factory import LiteralOutput, output_option
@click.command("list_bentos", context_settings=termui.CONTEXT_SETTINGS)
@output_option(default_value="json")
@click.pass_context
def cli(ctx: click.Context, output: LiteralOutput) -> None:
"""List available bentos built by OpenLLM."""
mapping = {
k: [{"tag": str(b.tag), "size": human_readable_size(openllm.utils.calc_dir_size(b.path)), "models": [{"tag": str(m.tag), "size": human_readable_size(openllm.utils.calc_dir_size(m.path))} for m in (bentoml.models.get(_.tag) for _ in b.info.models)]}
for b in tuple(i for i in bentoml.list() if all(k in i.info.labels for k in {"start_name", "bundler"})) if b.info.labels["start_name"] == k] for k in tuple(inflection.dasherize(key) for key in openllm.CONFIG_MAPPING.keys())
}
mapping = {k: [{"tag": str(b.tag), "size": human_readable_size(openllm.utils.calc_dir_size(b.path)), "models": [{"tag": str(m.tag), "size": human_readable_size(openllm.utils.calc_dir_size(m.path))} for m in (bentoml.models.get(_.tag) for _ in b.info.models)]} for b in tuple(i for i in bentoml.list() if all(k in i.info.labels for k in {"start_name", "bundler"})) if b.info.labels["start_name"] == k] for k in tuple(inflection.dasherize(key) for key in openllm.CONFIG_MAPPING.keys())}
mapping = {k: v for k, v in mapping.items() if v}
if output == "pretty":
import tabulate

View File

@@ -1,11 +1,10 @@
from __future__ import annotations
import typing as t, bentoml, openllm, orjson, inflection ,click
import typing as t, bentoml, openllm, orjson, inflection, click
from openllm.cli import termui
from bentoml._internal.utils import human_readable_size
from openllm.cli._factory import LiteralOutput, model_name_argument, output_option, model_complete_envvar
if t.TYPE_CHECKING: from openllm_core._typing_compat import DictStrAny
@click.command("list_models", context_settings=termui.CONTEXT_SETTINGS)
@model_name_argument(required=False, shell_complete=model_complete_envvar)
@output_option(default_value="json")

View File

@@ -7,15 +7,12 @@ from openllm_core.utils import is_jupyter_available, is_jupytext_available, is_n
if t.TYPE_CHECKING:
import jupytext, nbformat
from openllm_core._typing_compat import DictStrAny
logger = logging.getLogger(__name__)
def load_notebook_metadata() -> DictStrAny:
with open(os.path.join(os.path.dirname(playground.__file__), "_meta.yml"), "r") as f:
content = yaml.safe_load(f)
if not all("description" in k for k in content.values()): raise ValueError("Invalid metadata file. All entries must have a 'description' key.")
return content
@click.command("playground", context_settings=termui.CONTEXT_SETTINGS)
@click.argument("output-dir", default=None, required=False)
@click.option("--port", envvar="JUPYTER_PORT", show_envvar=True, show_default=True, default=8888, help="Default port for Jupyter server")

View File

@@ -1,11 +1,9 @@
from __future__ import annotations
import os, typing as t, click, inflection, openllm
if t.TYPE_CHECKING: from openllm_core._typing_compat import DictStrAny
def echo(text: t.Any, fg: str = "green", _with_style: bool = True, **attrs: t.Any) -> None:
attrs["fg"] = fg if not openllm.utils.get_debug_mode() else None
if not openllm.utils.get_quiet_mode(): t.cast(t.Callable[..., None], click.echo if not _with_style else click.secho)(text, **attrs)
COLUMNS: int = int(os.environ.get("COLUMNS", str(120)))
CONTEXT_SETTINGS: DictStrAny = {"help_option_names": ["-h", "--help"], "max_content_width": COLUMNS, "token_normalize_func": inflection.underscore}
__all__ = ["echo", "COLUMNS", "CONTEXT_SETTINGS"]

View File

@@ -13,5 +13,7 @@ client.embed("What is the difference between gather and scatter?")
from __future__ import annotations
import openllm_client, typing as t
if t.TYPE_CHECKING: from openllm_client import AsyncHTTPClient as AsyncHTTPClient, BaseAsyncClient as BaseAsyncClient, BaseClient as BaseClient, HTTPClient as HTTPClient, GrpcClient as GrpcClient, AsyncGrpcClient as AsyncGrpcClient
def __dir__() -> t.Sequence[str]: return sorted(dir(openllm_client))
def __getattr__(it: str) -> t.Any: return getattr(openllm_client, it)
def __dir__() -> t.Sequence[str]:
return sorted(dir(openllm_client))
def __getattr__(it: str) -> t.Any:
return getattr(openllm_client, it)

View File

@@ -3,7 +3,6 @@ import typing as t, os
import openllm
from openllm_core.utils import LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vllm_available
from openllm_core.config import AutoConfig as AutoConfig, CONFIG_MAPPING as CONFIG_MAPPING, CONFIG_MAPPING_NAMES as CONFIG_MAPPING_NAMES
_import_structure: dict[str, list[str]] = {"modeling_auto": ["MODEL_MAPPING_NAMES"], "modeling_flax_auto": ["MODEL_FLAX_MAPPING_NAMES"], "modeling_tf_auto": ["MODEL_TF_MAPPING_NAMES"], "modeling_vllm_auto": ["MODEL_VLLM_MAPPING_NAMES"]}
if t.TYPE_CHECKING:
from .modeling_auto import MODEL_MAPPING_NAMES as MODEL_MAPPING_NAMES
@@ -12,30 +11,34 @@ if t.TYPE_CHECKING:
from .modeling_vllm_auto import MODEL_VLLM_MAPPING_NAMES as MODEL_VLLM_MAPPING_NAMES
try:
if not is_torch_available(): raise openllm.exceptions.MissingDependencyError
except openllm.exceptions.MissingDependencyError: pass
except openllm.exceptions.MissingDependencyError:
pass
else:
_import_structure["modeling_auto"].extend(["AutoLLM", "MODEL_MAPPING"])
if t.TYPE_CHECKING: from .modeling_auto import MODEL_MAPPING as MODEL_MAPPING, AutoLLM as AutoLLM
try:
if not is_vllm_available(): raise openllm.exceptions.MissingDependencyError
except openllm.exceptions.MissingDependencyError: pass
except openllm.exceptions.MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_auto"].extend(["AutoVLLM", "MODEL_VLLM_MAPPING"])
if t.TYPE_CHECKING: from .modeling_vllm_auto import MODEL_VLLM_MAPPING as MODEL_VLLM_MAPPING, AutoVLLM as AutoVLLM
try:
if not is_flax_available(): raise openllm.exceptions.MissingDependencyError
except openllm.exceptions.MissingDependencyError: pass
except openllm.exceptions.MissingDependencyError:
pass
else:
_import_structure["modeling_flax_auto"].extend(["AutoFlaxLLM", "MODEL_FLAX_MAPPING"])
if t.TYPE_CHECKING: from .modeling_flax_auto import MODEL_FLAX_MAPPING as MODEL_FLAX_MAPPING, AutoFlaxLLM as AutoFlaxLLM
try:
if not is_tf_available(): raise openllm.exceptions.MissingDependencyError
except openllm.exceptions.MissingDependencyError: pass
except openllm.exceptions.MissingDependencyError:
pass
else:
_import_structure["modeling_tf_auto"].extend(["AutoTFLLM", "MODEL_TF_MAPPING"])
if t.TYPE_CHECKING: from .modeling_tf_auto import MODEL_TF_MAPPING as MODEL_TF_MAPPING, AutoTFLLM as AutoTFLLM
__lazy=LazyModule(__name__, os.path.abspath("__file__"), _import_structure)
__all__=__lazy.__all__
__dir__=__lazy.__dir__
__getattr__=__lazy.__getattr__
__lazy = LazyModule(__name__, os.path.abspath("__file__"), _import_structure)
__all__ = __lazy.__all__
__dir__ = __lazy.__dir__
__getattr__ = __lazy.__getattr__

View File

@@ -16,10 +16,12 @@ if t.TYPE_CHECKING:
ConfigModelItemsView = _odict_items[type[openllm.LLMConfig], type[openllm.LLM[t.Any, t.Any]]]
logger = logging.getLogger(__name__)
class BaseAutoLLMClass:
_model_mapping: t.ClassVar[_LazyAutoMapping]
def __init__(self, *args: t.Any, **attrs: t.Any): raise EnvironmentError(f"Cannot instantiate {self.__class__.__name__} directly. Please use '{self.__class__.__name__}.Runner(model_name)' instead.")
def __init__(self, *args: t.Any, **attrs: t.Any):
raise EnvironmentError(f"Cannot instantiate {self.__class__.__name__} directly. Please use '{self.__class__.__name__}.Runner(model_name)' instead.")
@classmethod
def for_model(cls, model: str, /, model_id: str | None = None, model_version: str | None = None, llm_config: openllm.LLMConfig | None = None, ensure_available: bool = False, **attrs: t.Any) -> openllm.LLM[t.Any, t.Any]:
"""The lower level API for creating a LLM instance.
@@ -32,6 +34,7 @@ class BaseAutoLLMClass:
llm = cls.infer_class_from_name(model).from_pretrained(model_id=model_id, model_version=model_version, llm_config=llm_config, **attrs)
if ensure_available: llm.ensure_model_id_exists()
return llm
@classmethod
def create_runner(cls, model: str, model_id: str | None = None, **attrs: t.Any) -> LLMRunner[t.Any, t.Any]:
"""Create a LLM Runner for the given model name.
@@ -46,8 +49,10 @@ class BaseAutoLLMClass:
"""
runner_kwargs_name = set(inspect.signature(openllm.LLM[t.Any, t.Any].to_runner).parameters)
runner_attrs = {k: v for k, v in attrs.items() if k in runner_kwargs_name}
for k in runner_attrs: del attrs[k]
for k in runner_attrs:
del attrs[k]
return cls.for_model(model, model_id=model_id, **attrs).to_runner(**runner_attrs)
@classmethod
def register(cls, config_class: type[openllm.LLMConfig], llm_class: type[openllm.LLM[t.Any, t.Any]]) -> None:
"""Register a new model for this class.
@@ -59,12 +64,12 @@ class BaseAutoLLMClass:
if hasattr(llm_class, "config_class") and llm_class.config_class is not config_class:
raise ValueError(f"The model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has {llm_class.config_class} and you passed {config_class}. Fix one of those so they match!")
cls._model_mapping.register(config_class, llm_class)
@classmethod
def infer_class_from_name(cls, name: str) -> type[openllm.LLM[t.Any, t.Any]]:
config_class = openllm.AutoConfig.infer_class_from_name(name)
if config_class in cls._model_mapping: return cls._model_mapping[config_class]
raise ValueError(f"Unrecognized configuration class ({config_class}) for {name}. Model name should be one of {', '.join(openllm.CONFIG_MAPPING.keys())} (Registered configuration class: {', '.join([i.__name__ for i in cls._model_mapping.keys()])}).")
def getattribute_from_module(module: types.ModuleType, attr: t.Any) -> t.Any:
if attr is None: return
if isinstance(attr, tuple): return tuple(getattribute_from_module(module, a) for a in attr)
@@ -72,10 +77,11 @@ def getattribute_from_module(module: types.ModuleType, attr: t.Any) -> t.Any:
# Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the object at the top level.
openllm_module = importlib.import_module("openllm")
if module != openllm_module:
try: return getattribute_from_module(openllm_module, attr)
except ValueError: raise ValueError(f"Could not find {attr} neither in {module} nor in {openllm_module}!") from None
try:
return getattribute_from_module(openllm_module, attr)
except ValueError:
raise ValueError(f"Could not find {attr} neither in {module} nor in {openllm_module}!") from None
raise ValueError(f"Could not find {attr} in {openllm_module}!")
class _LazyAutoMapping(OrderedDict, ReprMixin):
"""Based on transformers.models.auto.configuration_auto._LazyAutoMapping.
@@ -88,6 +94,7 @@ class _LazyAutoMapping(OrderedDict, ReprMixin):
self._model_mapping = model_mapping
self._extra_content: dict[t.Any, t.Any] = {}
self._modules: dict[str, types.ModuleType] = {}
def __getitem__(self, key: type[openllm.LLMConfig]) -> type[openllm.LLM[t.Any, t.Any]]:
if key in self._extra_content: return self._extra_content[key]
model_type = self._reverse_config_mapping[key.__name__]
@@ -97,24 +104,45 @@ class _LazyAutoMapping(OrderedDict, ReprMixin):
for mtype in model_types:
if mtype in self._model_mapping: return self._load_attr_from_module(mtype, self._model_mapping[mtype])
raise KeyError(key)
def _load_attr_from_module(self, model_type: str, attr: str) -> t.Any:
module_name = inflection.underscore(model_type)
if module_name not in self._modules: self._modules[module_name] = importlib.import_module(f".{module_name}", "openllm.models")
return getattribute_from_module(self._modules[module_name], attr)
def __len__(self) -> int: return len(set(self._config_mapping.keys()).intersection(self._model_mapping.keys())) + len(self._extra_content)
def __len__(self) -> int:
return len(set(self._config_mapping.keys()).intersection(self._model_mapping.keys())) + len(self._extra_content)
@property
def __repr_keys__(self) -> set[str]: return set(self._config_mapping.keys())
def __repr__(self) -> str: return ReprMixin.__repr__(self)
def __repr_args__(self) -> t.Generator[tuple[str, tuple[str, str]], t.Any, t.Any]: yield from ((key, (value, self._model_mapping[key])) for key, value in self._config_mapping.items() if key in self._model_mapping)
def __bool__(self) -> bool: return bool(self.keys())
def keys(self) -> ConfigModelKeysView: return t.cast("ConfigModelKeysView", [self._load_attr_from_module(key, name) for key, name in self._config_mapping.items() if key in self._model_mapping.keys()] + list(self._extra_content.keys()))
def values(self) -> ConfigModelValuesView: return t.cast("ConfigModelValuesView", [self._load_attr_from_module(key, name) for key, name in self._model_mapping.items() if key in self._config_mapping.keys()] + list(self._extra_content.values()))
def items(self) -> ConfigModelItemsView: return t.cast("ConfigModelItemsView", [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping.keys() if key in self._config_mapping.keys()] + list(self._extra_content.items()))
def __iter__(self) -> t.Iterator[type[openllm.LLMConfig]]: return iter(t.cast("SupportsIter[t.Iterator[type[openllm.LLMConfig]]]", self.keys()))
def __repr_keys__(self) -> set[str]:
return set(self._config_mapping.keys())
def __repr__(self) -> str:
return ReprMixin.__repr__(self)
def __repr_args__(self) -> t.Generator[tuple[str, tuple[str, str]], t.Any, t.Any]:
yield from ((key, (value, self._model_mapping[key])) for key, value in self._config_mapping.items() if key in self._model_mapping)
def __bool__(self) -> bool:
return bool(self.keys())
def keys(self) -> ConfigModelKeysView:
return t.cast("ConfigModelKeysView", [self._load_attr_from_module(key, name) for key, name in self._config_mapping.items() if key in self._model_mapping.keys()] + list(self._extra_content.keys()))
def values(self) -> ConfigModelValuesView:
return t.cast("ConfigModelValuesView", [self._load_attr_from_module(key, name) for key, name in self._model_mapping.items() if key in self._config_mapping.keys()] + list(self._extra_content.values()))
def items(self) -> ConfigModelItemsView:
return t.cast("ConfigModelItemsView", [(self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key])) for key in self._model_mapping.keys() if key in self._config_mapping.keys()] + list(self._extra_content.items()))
def __iter__(self) -> t.Iterator[type[openllm.LLMConfig]]:
return iter(t.cast("SupportsIter[t.Iterator[type[openllm.LLMConfig]]]", self.keys()))
def __contains__(self, item: t.Any) -> bool:
if item in self._extra_content: return True
if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping: return False
return self._reverse_config_mapping[item.__name__] in self._model_mapping
def register(self, key: t.Any, value: t.Any) -> None:
if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping:
if self._reverse_config_mapping[key.__name__] in self._model_mapping.keys(): raise ValueError(f"'{key}' is already used by a OpenLLM model.")

View File

@@ -3,7 +3,6 @@ import typing as t
from collections import OrderedDict
from .factory import BaseAutoLLMClass, _LazyAutoMapping
from openllm_core.config import CONFIG_MAPPING_NAMES
MODEL_MAPPING_NAMES = OrderedDict([("chatglm", "ChatGLM"), ("dolly_v2", "DollyV2"), ("falcon", "Falcon"), ("flan_t5", "FlanT5"), ("gpt_neox", "GPTNeoX"), ("llama", "Llama"), ("mpt", "MPT"), ("opt", "OPT"), ("stablelm", "StableLM"), ("starcoder", "StarCoder"), ("baichuan", "Baichuan")])
MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES)
class AutoLLM(BaseAutoLLMClass):

View File

@@ -3,7 +3,6 @@ import typing as t
from collections import OrderedDict
from .factory import BaseAutoLLMClass, _LazyAutoMapping
from openllm_core.config import CONFIG_MAPPING_NAMES
MODEL_FLAX_MAPPING_NAMES = OrderedDict([("flan_t5", "FlaxFlanT5"), ("opt", "FlaxOPT")])
MODEL_FLAX_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FLAX_MAPPING_NAMES)
class AutoFlaxLLM(BaseAutoLLMClass):

View File

@@ -3,7 +3,6 @@ import typing as t
from collections import OrderedDict
from .factory import BaseAutoLLMClass, _LazyAutoMapping
from openllm_core.config import CONFIG_MAPPING_NAMES
MODEL_TF_MAPPING_NAMES = OrderedDict([("flan_t5", "TFFlanT5"), ("opt", "TFOPT")])
MODEL_TF_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_TF_MAPPING_NAMES)
class AutoTFLLM(BaseAutoLLMClass):

View File

@@ -3,7 +3,6 @@ import typing as t
from collections import OrderedDict
from .factory import BaseAutoLLMClass, _LazyAutoMapping
from openllm_core.config import CONFIG_MAPPING_NAMES
MODEL_VLLM_MAPPING_NAMES = OrderedDict([("baichuan", "VLLMBaichuan"), ("dolly_v2", "VLLMDollyV2"), ("falcon", "VLLMFalcon"), ("gpt_neox", "VLLMGPTNeoX"), ("mpt", "VLLMMPT"), ("opt", "VLLMOPT"), ("stablelm", "VLLMStableLM"), ("starcoder", "VLLMStarCoder"), ("llama", "VLLMLlama")])
MODEL_VLLM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_VLLM_MAPPING_NAMES)
class AutoVLLM(BaseAutoLLMClass):

View File

@@ -2,22 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_cpm_kernels_available, is_torch_available, is_vllm_available
from openllm_core.config.configuration_baichuan import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_BAICHUAN_COMMAND_DOCSTRING as START_BAICHUAN_COMMAND_DOCSTRING,
BaichuanConfig as BaichuanConfig,
)
from openllm_core.config.configuration_baichuan import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_BAICHUAN_COMMAND_DOCSTRING as START_BAICHUAN_COMMAND_DOCSTRING, BaichuanConfig as BaichuanConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available() or not is_cpm_kernels_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_baichuan"] = ["Baichuan"]
if t.TYPE_CHECKING: from .modeling_baichuan import Baichuan as Baichuan
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_baichuan"] = ["VLLMBaichuan"]
if t.TYPE_CHECKING: from .modeling_vllm_baichuan import VLLMBaichuan as VLLMBaichuan

View File

@@ -1,9 +1,9 @@
from __future__ import annotations
import typing as t, openllm
if t.TYPE_CHECKING: import transformers
class Baichuan(openllm.LLM["transformers.PreTrainedModel", "transformers.PreTrainedTokenizerBase"]):
__openllm_internal__ = True
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)

View File

@@ -1,7 +1,6 @@
from __future__ import annotations
import typing as t, openllm
if t.TYPE_CHECKING: import vllm, transformers
class VLLMBaichuan(openllm.LLM["vllm.LLMEngine", "transformers.PreTrainedTokenizerBase"]):
__openllm_internal__ = True
tokenizer_id = "local"

View File

@@ -2,16 +2,12 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_cpm_kernels_available, is_torch_available
from openllm_core.config.configuration_chatglm import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_CHATGLM_COMMAND_DOCSTRING as START_CHATGLM_COMMAND_DOCSTRING,
ChatGLMConfig as ChatGLMConfig,
)
from openllm_core.config.configuration_chatglm import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_CHATGLM_COMMAND_DOCSTRING as START_CHATGLM_COMMAND_DOCSTRING, ChatGLMConfig as ChatGLMConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available() or not is_cpm_kernels_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_chatglm"] = ["ChatGLM"]
if t.TYPE_CHECKING: from .modeling_chatglm import ChatGLM as ChatGLM

View File

@@ -3,6 +3,7 @@ import typing as t, openllm
if t.TYPE_CHECKING: import transformers
class ChatGLM(openllm.LLM["transformers.PreTrainedModel", "transformers.PreTrainedTokenizerFast"]):
__openllm_internal__ = True
def generate(self, prompt: str, **attrs: t.Any) -> tuple[str, list[tuple[str, str]]]:
import torch
with torch.inference_mode():
@@ -10,6 +11,7 @@ class ChatGLM(openllm.LLM["transformers.PreTrainedModel", "transformers.PreTrain
# Only use half precision if the model is not yet quantized
if self.config.use_half_precision: self.model.half()
return self.model.chat(self.tokenizer, prompt, generation_config=self.config.model_construct_env(**attrs).to_generation_config())
def embeddings(self, prompts: list[str]) -> openllm.LLMEmbeddings:
import torch, torch.nn.functional as F
embeddings: list[list[float]] = []

View File

@@ -2,22 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_dolly_v2 import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_DOLLY_V2_COMMAND_DOCSTRING as START_DOLLY_V2_COMMAND_DOCSTRING,
DollyV2Config as DollyV2Config,
)
from openllm_core.config.configuration_dolly_v2 import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_DOLLY_V2_COMMAND_DOCSTRING as START_DOLLY_V2_COMMAND_DOCSTRING, DollyV2Config as DollyV2Config
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_dolly_v2"] = ["DollyV2"]
if t.TYPE_CHECKING: from .modeling_dolly_v2 import DollyV2 as DollyV2
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_dolly_v2"] = ["VLLMDollyV2"]
if t.TYPE_CHECKING: from .modeling_vllm_dolly_v2 import VLLMDollyV2 as VLLMDollyV2

View File

@@ -6,15 +6,18 @@ from openllm_core.config.configuration_dolly_v2 import DEFAULT_PROMPT_TEMPLATE,
if t.TYPE_CHECKING: import torch, transformers, tensorflow as tf
else: torch, transformers, tf = openllm.utils.LazyLoader("torch", globals(), "torch"), openllm.utils.LazyLoader("transformers", globals(), "transformers"), openllm.utils.LazyLoader("tf", globals(), "tensorflow")
logger = logging.getLogger(__name__)
@overload
def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.PreTrainedTokenizer, _init: t.Literal[True] = True, **attrs: t.Any) -> transformers.Pipeline: ...
def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.PreTrainedTokenizer, _init: t.Literal[True] = True, **attrs: t.Any) -> transformers.Pipeline:
...
@overload
def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.PreTrainedTokenizer, _init: t.Literal[False] = ..., **attrs: t.Any) -> type[transformers.Pipeline]: ...
def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.PreTrainedTokenizer, _init: t.Literal[False] = ..., **attrs: t.Any) -> type[transformers.Pipeline]:
...
def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.PreTrainedTokenizer, _init: bool = False, **attrs: t.Any) -> type[transformers.Pipeline] | transformers.Pipeline:
# Lazy loading the pipeline. See databricks' implementation on HuggingFace for more information.
class InstructionTextGenerationPipeline(transformers.Pipeline):
def __init__(self, *args: t.Any, do_sample: bool = True, max_new_tokens: int = 256, top_p: float = 0.92, top_k: int = 0, **kwargs: t.Any): super().__init__(*args, model=model, tokenizer=tokenizer, do_sample=do_sample, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k, **kwargs)
def __init__(self, *args: t.Any, do_sample: bool = True, max_new_tokens: int = 256, top_p: float = 0.92, top_k: int = 0, **kwargs: t.Any):
super().__init__(*args, model=model, tokenizer=tokenizer, do_sample=do_sample, max_new_tokens=max_new_tokens, top_p=top_p, top_k=top_k, **kwargs)
def _sanitize_parameters(self, return_full_text: bool | None = None, **generate_kwargs: t.Any) -> tuple[dict[str, t.Any], dict[str, t.Any], dict[str, t.Any]]:
if t.TYPE_CHECKING: assert self.tokenizer is not None
preprocess_params: dict[str, t.Any] = {}
@@ -29,11 +32,13 @@ def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.Pr
end_key_token_id = get_special_token_id(self.tokenizer, END_KEY)
# Ensure generation stops once it generates "### End"
generate_kwargs["eos_token_id"] = end_key_token_id
except ValueError: pass
except ValueError:
pass
forward_params = generate_kwargs
postprocess_params = {"response_key_token_id": response_key_token_id, "end_key_token_id": end_key_token_id}
if return_full_text is not None: postprocess_params["return_full_text"] = return_full_text
return preprocess_params, forward_params, postprocess_params
def preprocess(self, input_: str, **generate_kwargs: t.Any) -> t.Dict[str, t.Any]:
if t.TYPE_CHECKING: assert self.tokenizer is not None
prompt_text = DEFAULT_PROMPT_TEMPLATE.format(instruction=input_)
@@ -41,6 +46,7 @@ def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.Pr
inputs["prompt_text"] = prompt_text
inputs["instruction_text"] = input_
return t.cast(t.Dict[str, t.Any], inputs)
def _forward(self, input_tensors: dict[str, t.Any], **generate_kwargs: t.Any) -> transformers.utils.generic.ModelOutput:
if t.TYPE_CHECKING: assert self.tokenizer is not None
input_ids, attention_mask = input_tensors["input_ids"], input_tensors.get("attention_mask", None)
@@ -52,6 +58,7 @@ def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.Pr
elif self.framework == "tf": generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
instruction_text = input_tensors.pop("instruction_text")
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text}
def postprocess(self, model_outputs: dict[str, t.Any], response_key_token_id: int, end_key_token_id: int, return_full_text: bool = False) -> list[dict[t.Literal["generated_text"], str]]:
if t.TYPE_CHECKING: assert self.tokenizer is not None
_generated_sequence, instruction_text = model_outputs["generated_sequence"][0], model_outputs["instruction_text"]
@@ -64,16 +71,20 @@ def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.Pr
if response_key_token_id and end_key_token_id:
# Find where "### Response:" is first found in the generated tokens. Considering this is part of the
# prompt, we should definitely find it. We will return the tokens found after this token.
try: response_pos = sequence.index(response_key_token_id)
except ValueError: response_pos = None
try:
response_pos = sequence.index(response_key_token_id)
except ValueError:
response_pos = None
if response_pos is None: logger.warning("Could not find response key %s in: %s", response_key_token_id, sequence)
if response_pos:
# Next find where "### End" is located. The model has been trained to end its responses with this
# sequence (or actually, the token ID it maps to, since it is a special token). We may not find
# this token, as the response could be truncated. If we don't find it then just return everything
# to the end. Note that even though we set eos_token_id, we still see the this token at the end.
try: end_pos = sequence.index(end_key_token_id)
except ValueError: end_pos = None
try:
end_pos = sequence.index(end_key_token_id)
except ValueError:
end_pos = None
decoded = self.tokenizer.decode(sequence[response_pos + 1:end_pos]).strip()
if not decoded:
# Otherwise we'll decode everything and use a regex to find the response and end.
@@ -94,13 +105,19 @@ def get_pipeline(model: transformers.PreTrainedModel, tokenizer: transformers.Pr
if return_full_text: decoded = f"{instruction_text}\n{decoded}"
records.append({"generated_text": t.cast(str, decoded)})
return records
return InstructionTextGenerationPipeline() if _init else InstructionTextGenerationPipeline
return InstructionTextGenerationPipeline() if _init else InstructionTextGenerationPipeline
class DollyV2(openllm.LLM["transformers.Pipeline", "transformers.PreTrainedTokenizer"]):
__openllm_internal__ = True
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]: return {"device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, "torch_dtype": torch.bfloat16}, {}
def load_model(self, *args: t.Any, **attrs: t.Any) -> transformers.Pipeline: return get_pipeline(transformers.AutoModelForCausalLM.from_pretrained(self._bentomodel.path, *args, **attrs), self.tokenizer, _init=True, return_full_text=self.config.return_full_text)
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
return {"device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, "torch_dtype": torch.bfloat16}, {}
def load_model(self, *args: t.Any, **attrs: t.Any) -> transformers.Pipeline:
return get_pipeline(transformers.AutoModelForCausalLM.from_pretrained(self._bentomodel.path, *args, **attrs), self.tokenizer, _init=True, return_full_text=self.config.return_full_text)
def generate(self, prompt: str, **attrs: t.Any) -> list[dict[t.Literal["generated_text"], str]]:
llm_config = self.config.model_construct_env(**attrs)
with torch.inference_mode(): return self.model(prompt, return_full_text=llm_config.return_full_text, generation_config=llm_config.to_generation_config())
with torch.inference_mode():
return self.model(prompt, return_full_text=llm_config.return_full_text, generation_config=llm_config.to_generation_config())

View File

@@ -2,22 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_falcon import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_FALCON_COMMAND_DOCSTRING as START_FALCON_COMMAND_DOCSTRING,
FalconConfig as FalconConfig,
)
from openllm_core.config.configuration_falcon import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_FALCON_COMMAND_DOCSTRING as START_FALCON_COMMAND_DOCSTRING, FalconConfig as FalconConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_falcon"] = ["Falcon"]
if t.TYPE_CHECKING: from .modeling_falcon import Falcon as Falcon
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_falcon"] = ["VLLMFalcon"]
if t.TYPE_CHECKING: from .modeling_vllm_falcon import VLLMFalcon as VLLMFalcon

View File

@@ -2,15 +2,18 @@ from __future__ import annotations
import typing as t, openllm
if t.TYPE_CHECKING: import torch, transformers
else: torch, transformers = openllm.utils.LazyLoader("torch", globals(), "torch"), openllm.utils.LazyLoader("transformers", globals(), "transformers")
class Falcon(openllm.LLM["transformers.PreTrainedModel", "transformers.PreTrainedTokenizerBase"]):
__openllm_internal__ = True
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]: return {"torch_dtype": torch.bfloat16, "device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None}, {}
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
return {"torch_dtype": torch.bfloat16, "device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None}, {}
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
eos_token_id, inputs = attrs.pop("eos_token_id", self.tokenizer.eos_token_id), self.tokenizer(prompt, return_tensors="pt").to(self.device)
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16): # type: ignore[attr-defined]
return self.tokenizer.batch_decode(self.model.generate(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], generation_config=self.config.model_construct_env(eos_token_id=eos_token_id, **attrs).to_generation_config()), skip_special_tokens=True)
def generate_one(self, prompt: str, stop: list[str], **preprocess_generate_kwds: t.Any) -> list[dict[t.Literal["generated_text"], str]]:
max_new_tokens, encoded_inputs = preprocess_generate_kwds.pop("max_new_tokens", 200), self.tokenizer(prompt, return_tensors="pt").to(self.device)
src_len, stopping_criteria = encoded_inputs["input_ids"].shape[1], preprocess_generate_kwds.pop("stopping_criteria", openllm.StoppingCriteriaList([]))

View File

@@ -2,28 +2,26 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_flax_available, is_tf_available, is_torch_available
from openllm_core.config.configuration_flan_t5 import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_FLAN_T5_COMMAND_DOCSTRING as START_FLAN_T5_COMMAND_DOCSTRING,
FlanT5Config as FlanT5Config,
)
from openllm_core.config.configuration_flan_t5 import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_FLAN_T5_COMMAND_DOCSTRING as START_FLAN_T5_COMMAND_DOCSTRING, FlanT5Config as FlanT5Config
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_flan_t5"] = ["FlanT5"]
if t.TYPE_CHECKING: from .modeling_flan_t5 import FlanT5 as FlanT5
try:
if not is_flax_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_flax_flan_t5"] = ["FlaxFlanT5"]
if t.TYPE_CHECKING: from .modeling_flax_flan_t5 import FlaxFlanT5 as FlaxFlanT5
try:
if not is_tf_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_tf_flan_t5"] = ["TFFlanT5"]
if t.TYPE_CHECKING: from .modeling_tf_flan_t5 import TFFlanT5 as TFFlanT5

View File

@@ -1,12 +1,14 @@
from __future__ import annotations
import typing as t, openllm
if t.TYPE_CHECKING: import transformers
class FlanT5(openllm.LLM["transformers.T5ForConditionalGeneration", "transformers.T5TokenizerFast"]):
__openllm_internal__ = True
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
with torch.inference_mode(): return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to(self.device), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)
with torch.inference_mode():
return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to(self.device), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)
def embeddings(self, prompts: list[str]) -> openllm.LLMEmbeddings:
import torch, torch.nn.functional as F
embeddings: list[list[float]] = []

View File

@@ -3,12 +3,13 @@ import typing as t, openllm
from openllm_core._prompt import process_prompt
from openllm_core.config.configuration_flan_t5 import DEFAULT_PROMPT_TEMPLATE
if t.TYPE_CHECKING: import transformers
class FlaxFlanT5(openllm.LLM["transformers.FlaxT5ForConditionalGeneration", "transformers.T5TokenizerFast"]):
__openllm_internal__ = True
def sanitize_parameters(self, prompt: str, max_new_tokens: int | None = None, temperature: float | None = None, top_k: int | None = None, top_p: float | None = None, repetition_penalty: float | None = None, decoder_start_token_id: int | None = None, use_default_prompt_template: bool = True, **attrs: t.Any) -> tuple[str, dict[str, t.Any], dict[str, t.Any]]:
if decoder_start_token_id is None: decoder_start_token_id = 0
return process_prompt(prompt, DEFAULT_PROMPT_TEMPLATE, use_default_prompt_template, **attrs), {"max_new_tokens": max_new_tokens, "temperature": temperature, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty, "decoder_start_token_id": decoder_start_token_id}, {}
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
# NOTE: decoder_start_token_id is extracted from https://huggingface.co/google/flan-t5-small/tree/main as it is required for encoder-decoder generation.
decoder_start_token_id = attrs.pop("decoder_start_token_id", 0)

View File

@@ -1,7 +1,8 @@
from __future__ import annotations
import typing as t, openllm
if t.TYPE_CHECKING: import transformers
class TFFlanT5(openllm.LLM["transformers.TFT5ForConditionalGeneration", "transformers.T5TokenizerFast"]):
__openllm_internal__ = True
def generate(self, prompt: str, **attrs: t.Any) -> list[str]: return self.tokenizer.batch_decode(self.model.generate(self.tokenizer(prompt, return_tensors="tf").input_ids, do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
return self.tokenizer.batch_decode(self.model.generate(self.tokenizer(prompt, return_tensors="tf").input_ids, do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)

View File

@@ -2,22 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_gpt_neox import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_GPT_NEOX_COMMAND_DOCSTRING as START_GPT_NEOX_COMMAND_DOCSTRING,
GPTNeoXConfig as GPTNeoXConfig,
)
from openllm_core.config.configuration_gpt_neox import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_GPT_NEOX_COMMAND_DOCSTRING as START_GPT_NEOX_COMMAND_DOCSTRING, GPTNeoXConfig as GPTNeoXConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_gpt_neox"] = ["GPTNeoX"]
if t.TYPE_CHECKING: from .modeling_gpt_neox import GPTNeoX as GPTNeoX
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_gpt_neox"] = ["VLLMGPTNeoX"]
if t.TYPE_CHECKING: from .modeling_vllm_gpt_neox import VLLMGPTNeoX as VLLMGPTNeoX

View File

@@ -5,15 +5,19 @@ if t.TYPE_CHECKING: import transformers
logger = logging.getLogger(__name__)
class GPTNeoX(openllm.LLM["transformers.GPTNeoXForCausalLM", "transformers.GPTNeoXTokenizerFast"]):
__openllm_internal__ = True
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
import torch
return {"device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None}, {}
def load_model(self, *args: t.Any, **attrs: t.Any) -> transformers.GPTNeoXForCausalLM:
import transformers
model = transformers.AutoModelForCausalLM.from_pretrained(self._bentomodel.path, *args, **attrs)
if self.config.use_half_precision: model.half()
return model
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
with torch.inference_mode(): return self.tokenizer.batch_decode(self.model.generate(self.tokenizer(prompt, return_tensors="pt").to(self.device).input_ids, do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config(), pad_token_id=self.tokenizer.eos_token_id, stopping_criteria=openllm.StoppingCriteriaList([openllm.StopOnTokens()])))
with torch.inference_mode():
return self.tokenizer.batch_decode(self.model.generate(self.tokenizer(prompt, return_tensors="pt").to(self.device).input_ids, do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config(), pad_token_id=self.tokenizer.eos_token_id, stopping_criteria=openllm.StoppingCriteriaList([openllm.StopOnTokens()])))

View File

@@ -1,7 +1,6 @@
from __future__ import annotations
import typing as t, openllm
if t.TYPE_CHECKING: import vllm, transformers
class VLLMGPTNeoX(openllm.LLM["vllm.LLMEngine", "transformers.GPTNeoXTokenizerFast"]):
__openllm_internal__ = True
tokenizer_id = "local"

View File

@@ -2,23 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_llama import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
PROMPT_MAPPING as PROMPT_MAPPING,
START_LLAMA_COMMAND_DOCSTRING as START_LLAMA_COMMAND_DOCSTRING,
LlamaConfig as LlamaConfig,
)
from openllm_core.config.configuration_llama import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, PROMPT_MAPPING as PROMPT_MAPPING, START_LLAMA_COMMAND_DOCSTRING as START_LLAMA_COMMAND_DOCSTRING, LlamaConfig as LlamaConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_llama"] = ["VLLMLlama"]
if t.TYPE_CHECKING: from .modeling_vllm_llama import VLLMLlama as VLLMLlama
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_llama"] = ["Llama"]
if t.TYPE_CHECKING: from .modeling_llama import Llama as Llama

View File

@@ -3,10 +3,12 @@ import typing as t, openllm
if t.TYPE_CHECKING: import transformers
class Llama(openllm.LLM["transformers.LlamaForCausalLM", "transformers.LlamaTokenizerFast"]):
__openllm_internal__ = True
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
import torch
return {"torch_dtype": torch.float16 if torch.cuda.is_available() else torch.float32}, {}
def embeddings(self, prompts: list[str]) -> openllm.LLMEmbeddings:
import torch, torch.nn.functional as F
encoding = self.tokenizer(prompts, padding=True, return_tensors="pt").to(self.device)

View File

@@ -2,23 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_mpt import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
PROMPT_MAPPING as PROMPT_MAPPING,
START_MPT_COMMAND_DOCSTRING as START_MPT_COMMAND_DOCSTRING,
MPTConfig as MPTConfig,
)
from openllm_core.config.configuration_mpt import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, PROMPT_MAPPING as PROMPT_MAPPING, START_MPT_COMMAND_DOCSTRING as START_MPT_COMMAND_DOCSTRING, MPTConfig as MPTConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_mpt"] = ["MPT"]
if t.TYPE_CHECKING: from .modeling_mpt import MPT as MPT
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_mpt"] = ["VLLMMPT"]
if t.TYPE_CHECKING: from .modeling_vllm_mpt import VLLMMPT as VLLMMPT

View File

@@ -15,13 +15,16 @@ def get_mpt_config(model_id_or_path: str, max_sequence_length: int, device: torc
return config
class MPT(openllm.LLM["transformers.PreTrainedModel", "transformers.GPTNeoXTokenizerFast"]):
__openllm_internal__ = True
def llm_post_init(self) -> None:
import torch
self.dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
import torch
return {"device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, "torch_dtype": torch.bfloat16 if torch.cuda.is_available() else torch.float32}, {}
def import_model(self, *args: t.Any, trust_remote_code: bool = True, **attrs: t.Any) -> bentoml.Model:
import torch, transformers
_, tokenizer_attrs = self.llm_parameters
@@ -32,8 +35,11 @@ class MPT(openllm.LLM["transformers.PreTrainedModel", "transformers.GPTNeoXToken
tokenizer = transformers.AutoTokenizer.from_pretrained(self.model_id, **tokenizer_attrs)
if tokenizer.pad_token_id is None: tokenizer.pad_token = tokenizer.eos_token
model = transformers.AutoModelForCausalLM.from_pretrained(self.model_id, config=config, torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, device_map=device_map, **attrs)
try: return bentoml.transformers.save_model(self.tag, model, custom_objects={"tokenizer": tokenizer}, labels=generate_labels(self))
finally: torch.cuda.empty_cache()
try:
return bentoml.transformers.save_model(self.tag, model, custom_objects={"tokenizer": tokenizer}, labels=generate_labels(self))
finally:
torch.cuda.empty_cache()
def load_model(self, *args: t.Any, **attrs: t.Any) -> transformers.PreTrainedModel:
import transformers
torch_dtype = attrs.pop("torch_dtype", self.dtype)
@@ -43,6 +49,7 @@ class MPT(openllm.LLM["transformers.PreTrainedModel", "transformers.GPTNeoXToken
model = transformers.AutoModelForCausalLM.from_pretrained(self._bentomodel.path, config=config, trust_remote_code=trust_remote_code, torch_dtype=torch_dtype, device_map=device_map, **attrs)
model.tie_weights()
return model
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
llm_config = self.config.model_construct_env(**attrs)
@@ -52,5 +59,6 @@ class MPT(openllm.LLM["transformers.PreTrainedModel", "transformers.GPTNeoXToken
if torch.cuda.is_available():
with torch.autocast("cuda", torch.float16): # type: ignore[attr-defined]
generated_tensors = self.model.generate(**inputs, **attrs)
else: generated_tensors = self.model.generate(**inputs, **attrs)
else:
generated_tensors = self.model.generate(**inputs, **attrs)
return self.tokenizer.batch_decode(generated_tensors, skip_special_tokens=True)

View File

@@ -2,34 +2,33 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vllm_available
from openllm_core.config.configuration_opt import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_OPT_COMMAND_DOCSTRING as START_OPT_COMMAND_DOCSTRING,
OPTConfig as OPTConfig,
)
from openllm_core.config.configuration_opt import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_OPT_COMMAND_DOCSTRING as START_OPT_COMMAND_DOCSTRING, OPTConfig as OPTConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_opt"] = ["OPT"]
if t.TYPE_CHECKING: from .modeling_opt import OPT as OPT
try:
if not is_flax_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_flax_opt"] = ["FlaxOPT"]
if t.TYPE_CHECKING: from .modeling_flax_opt import FlaxOPT as FlaxOPT
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_opt"] = ["VLLMOPT"]
if t.TYPE_CHECKING: from .modeling_vllm_opt import VLLMOPT as VLLMOPT
try:
if not is_tf_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_tf_opt"] = ["TFOPT"]
if t.TYPE_CHECKING: from .modeling_tf_opt import TFOPT as TFOPT

View File

@@ -9,9 +9,14 @@ else: transformers = openllm.utils.LazyLoader("transformers", globals(), "transf
logger = logging.getLogger(__name__)
class FlaxOPT(openllm.LLM["transformers.TFOPTForCausalLM", "transformers.GPT2Tokenizer"]):
__openllm_internal__ = True
def import_model(self, *args: t.Any, trust_remote_code: bool = False, **attrs: t.Any) -> bentoml.Model:
config, tokenizer = transformers.AutoConfig.from_pretrained(self.model_id), transformers.AutoTokenizer.from_pretrained(self.model_id, **self.llm_parameters[-1])
tokenizer.pad_token_id = config.pad_token_id
return bentoml.transformers.save_model(self.tag, transformers.FlaxAutoModelForCausalLM.from_pretrained(self.model_id, **attrs), custom_objects={"tokenizer": tokenizer}, labels=generate_labels(self))
def sanitize_parameters(self, prompt: str, max_new_tokens: int | None = None, temperature: float | None = None, top_k: int | None = None, num_return_sequences: int | None = None, repetition_penalty: float | None = None, use_default_prompt_template: bool = False, **attrs: t.Any) -> tuple[str, dict[str, t.Any], dict[str, t.Any]]: return process_prompt(prompt, DEFAULT_PROMPT_TEMPLATE, use_default_prompt_template, **attrs), {"max_new_tokens": max_new_tokens, "temperature": temperature, "top_k": top_k, "num_return_sequences": num_return_sequences, "repetition_penalty": repetition_penalty}, {}
def generate(self, prompt: str, **attrs: t.Any) -> list[str]: return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="np"), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()).sequences, skip_special_tokens=True)
def sanitize_parameters(self, prompt: str, max_new_tokens: int | None = None, temperature: float | None = None, top_k: int | None = None, num_return_sequences: int | None = None, repetition_penalty: float | None = None, use_default_prompt_template: bool = False, **attrs: t.Any) -> tuple[str, dict[str, t.Any], dict[str, t.Any]]:
return process_prompt(prompt, DEFAULT_PROMPT_TEMPLATE, use_default_prompt_template, **attrs), {"max_new_tokens": max_new_tokens, "temperature": temperature, "top_k": top_k, "num_return_sequences": num_return_sequences, "repetition_penalty": repetition_penalty}, {}
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="np"), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()).sequences, skip_special_tokens=True)

View File

@@ -5,10 +5,13 @@ if t.TYPE_CHECKING: import transformers
logger = logging.getLogger(__name__)
class OPT(openllm.LLM["transformers.OPTForCausalLM", "transformers.GPT2Tokenizer"]):
__openllm_internal__ = True
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
import torch
return {"torch_dtype": torch.float16 if torch.cuda.is_available() else torch.float32}, {}
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
with torch.inference_mode(): return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to(self.device), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)
with torch.inference_mode():
return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to(self.device), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)

View File

@@ -4,9 +4,12 @@ from openllm_core.utils import generate_labels
if t.TYPE_CHECKING: import transformers
class TFOPT(openllm.LLM["transformers.TFOPTForCausalLM", "transformers.GPT2Tokenizer"]):
__openllm_internal__ = True
def import_model(self, *args: t.Any, trust_remote_code: bool = False, **attrs: t.Any) -> bentoml.Model:
import transformers
config, tokenizer = transformers.AutoConfig.from_pretrained(self.model_id), transformers.AutoTokenizer.from_pretrained(self.model_id, **self.llm_parameters[-1])
tokenizer.pad_token_id = config.pad_token_id
return bentoml.transformers.save_model(self.tag, transformers.TFOPTForCausalLM.from_pretrained(self.model_id, trust_remote_code=trust_remote_code, **attrs), custom_objects={"tokenizer": tokenizer}, labels=generate_labels(self))
def generate(self, prompt: str, **attrs: t.Any) -> list[str]: return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="tf"), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
return self.tokenizer.batch_decode(self.model.generate(**self.tokenizer(prompt, return_tensors="tf"), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config()), skip_special_tokens=True)

View File

@@ -6,4 +6,6 @@ if t.TYPE_CHECKING: import vllm, transformers
class VLLMOPT(openllm.LLM["vllm.LLMEngine", "transformers.GPT2Tokenizer"]):
__openllm_internal__ = True
tokenizer_id = "local"
def sanitize_parameters(self, prompt: str, max_new_tokens: int | None = None, temperature: float | None = None, top_k: int | None = None, num_return_sequences: int | None = None, use_default_prompt_template: bool = True, **attrs: t.Any) -> tuple[str, dict[str, t.Any], dict[str, t.Any]]: return process_prompt(prompt, DEFAULT_PROMPT_TEMPLATE, use_default_prompt_template, **attrs), {"max_new_tokens": max_new_tokens, "temperature": temperature, "top_k": top_k, "num_return_sequences": num_return_sequences}, {}
def sanitize_parameters(self, prompt: str, max_new_tokens: int | None = None, temperature: float | None = None, top_k: int | None = None, num_return_sequences: int | None = None, use_default_prompt_template: bool = True, **attrs: t.Any) -> tuple[str, dict[str, t.Any], dict[str, t.Any]]:
return process_prompt(prompt, DEFAULT_PROMPT_TEMPLATE, use_default_prompt_template, **attrs), {"max_new_tokens": max_new_tokens, "temperature": temperature, "top_k": top_k, "num_return_sequences": num_return_sequences}, {}

View File

@@ -2,22 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_stablelm import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_STABLELM_COMMAND_DOCSTRING as START_STABLELM_COMMAND_DOCSTRING,
StableLMConfig as StableLMConfig,
)
from openllm_core.config.configuration_stablelm import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_STABLELM_COMMAND_DOCSTRING as START_STABLELM_COMMAND_DOCSTRING, StableLMConfig as StableLMConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_stablelm"] = ["StableLM"]
if t.TYPE_CHECKING: from .modeling_stablelm import StableLM as StableLM
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_stablelm"] = ["VLLMStableLM"]
if t.TYPE_CHECKING: from .modeling_vllm_stablelm import VLLMStableLM as VLLMStableLM

View File

@@ -3,13 +3,17 @@ import typing as t, openllm
if t.TYPE_CHECKING: import transformers
class StableLM(openllm.LLM["transformers.GPTNeoXForCausalLM", "transformers.GPTNeoXTokenizerFast"]):
__openllm_internal__ = True
def llm_post_init(self) -> None:
import torch
self.bettertransformer = True if not torch.cuda.is_available() else False
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
import torch
return {"torch_dtype": torch.float16 if torch.cuda.is_available() else torch.float32}, {}
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
with torch.inference_mode(): return [self.tokenizer.decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to(self.device), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config(), pad_token_id=self.tokenizer.eos_token_id, stopping_criteria=openllm.StoppingCriteriaList([openllm.StopOnTokens()]))[0], skip_special_tokens=True)]
with torch.inference_mode():
return [self.tokenizer.decode(self.model.generate(**self.tokenizer(prompt, return_tensors="pt").to(self.device), do_sample=True, generation_config=self.config.model_construct_env(**attrs).to_generation_config(), pad_token_id=self.tokenizer.eos_token_id, stopping_criteria=openllm.StoppingCriteriaList([openllm.StopOnTokens()]))[0], skip_special_tokens=True)]

View File

@@ -2,22 +2,19 @@ from __future__ import annotations
import sys, typing as t
from openllm.exceptions import MissingDependencyError
from openllm.utils import LazyModule, is_torch_available, is_vllm_available
from openllm_core.config.configuration_starcoder import (
DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE,
START_STARCODER_COMMAND_DOCSTRING as START_STARCODER_COMMAND_DOCSTRING,
StarCoderConfig as StarCoderConfig,
)
from openllm_core.config.configuration_starcoder import DEFAULT_PROMPT_TEMPLATE as DEFAULT_PROMPT_TEMPLATE, START_STARCODER_COMMAND_DOCSTRING as START_STARCODER_COMMAND_DOCSTRING, StarCoderConfig as StarCoderConfig
_import_structure: dict[str, list[str]] = {}
try:
if not is_torch_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_starcoder"] = ["StarCoder"]
if t.TYPE_CHECKING: from .modeling_starcoder import StarCoder as StarCoder
try:
if not is_vllm_available(): raise MissingDependencyError
except MissingDependencyError: pass
except MissingDependencyError:
pass
else:
_import_structure["modeling_vllm_starcoder"] = ["VLLMStarCoder"]
if t.TYPE_CHECKING: from .modeling_vllm_starcoder import VLLMStarCoder as VLLMStarCoder

View File

@@ -5,18 +5,23 @@ from openllm_core.config.configuration_starcoder import EOD, FIM_MIDDLE, FIM_PAD
if t.TYPE_CHECKING: import transformers
class StarCoder(openllm.LLM["transformers.GPTBigCodeForCausalLM", "transformers.GPT2TokenizerFast"]):
__openllm_internal__ = True
@property
def import_kwargs(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
import torch
return {"device_map": "auto" if torch.cuda.is_available() and torch.cuda.device_count() > 1 else None, "torch_dtype": torch.float16 if torch.cuda.is_available() else torch.float32}, {}
def import_model(self, *args: t.Any, trust_remote_code: bool = False, **attrs: t.Any) -> bentoml.Model:
import torch, transformers
torch_dtype, device_map = attrs.pop("torch_dtype", torch.float16), attrs.pop("device_map", "auto")
tokenizer = transformers.AutoTokenizer.from_pretrained(self.model_id, **self.llm_parameters[-1])
tokenizer.add_special_tokens({"additional_special_tokens": [EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD], "pad_token": EOD})
model = transformers.AutoModelForCausalLM.from_pretrained(self.model_id, torch_dtype=torch_dtype, device_map=device_map, **attrs)
try: return bentoml.transformers.save_model(self.tag, model, custom_objects={"tokenizer": tokenizer}, labels=generate_labels(self))
finally: torch.cuda.empty_cache()
try:
return bentoml.transformers.save_model(self.tag, model, custom_objects={"tokenizer": tokenizer}, labels=generate_labels(self))
finally:
torch.cuda.empty_cache()
def generate(self, prompt: str, **attrs: t.Any) -> list[str]:
import torch
with torch.inference_mode():
@@ -26,6 +31,7 @@ class StarCoder(openllm.LLM["transformers.GPTBigCodeForCausalLM", "transformers.
# TODO: We will probably want to return the tokenizer here so that we can manually process this
# return (skip_special_tokens=False, clean_up_tokenization_spaces=False))
return self.tokenizer.batch_decode(result_tensor[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
def generate_one(self, prompt: str, stop: list[str], **preprocess_generate_kwds: t.Any) -> list[dict[t.Literal["generated_text"], str]]:
max_new_tokens, encoded_inputs = preprocess_generate_kwds.pop("max_new_tokens", 200), self.tokenizer(prompt, return_tensors="pt").to(self.device)
src_len, stopping_criteria = encoded_inputs["input_ids"].shape[1], preprocess_generate_kwds.pop("stopping_criteria", openllm.StoppingCriteriaList([]))

View File

@@ -30,12 +30,7 @@ from openllm_core._typing_compat import M, T, ParamSpec
if t.TYPE_CHECKING:
import bentoml
from . import (
constants as constants,
ggml as ggml,
transformers as transformers,
)
from . import constants as constants, ggml as ggml, transformers as transformers
P = ParamSpec("P")
def load_tokenizer(llm: openllm.LLM[t.Any, T], **tokenizer_attrs: t.Any) -> T:
"""Load the tokenizer from BentoML store.
@@ -49,11 +44,14 @@ def load_tokenizer(llm: openllm.LLM[t.Any, T], **tokenizer_attrs: t.Any) -> T:
bentomodel_fs = fs.open_fs(llm._bentomodel.path)
if bentomodel_fs.isfile(CUSTOM_OBJECTS_FILENAME):
with bentomodel_fs.open(CUSTOM_OBJECTS_FILENAME, "rb") as cofile:
try: tokenizer = cloudpickle.load(t.cast("t.IO[bytes]", cofile))["tokenizer"]
except KeyError: raise openllm.exceptions.OpenLLMException("Bento model does not have tokenizer. Make sure to save"
" the tokenizer within the model via 'custom_objects'."
" For example: \"bentoml.transformers.save_model(..., custom_objects={'tokenizer': tokenizer})\"") from None
else: tokenizer = infer_tokenizers_from_llm(llm).from_pretrained(bentomodel_fs.getsyspath("/"), trust_remote_code=llm.__llm_trust_remote_code__, **tokenizer_attrs)
try:
tokenizer = cloudpickle.load(t.cast("t.IO[bytes]", cofile))["tokenizer"]
except KeyError:
raise openllm.exceptions.OpenLLMException("Bento model does not have tokenizer. Make sure to save"
" the tokenizer within the model via 'custom_objects'."
" For example: \"bentoml.transformers.save_model(..., custom_objects={'tokenizer': tokenizer})\"") from None
else:
tokenizer = infer_tokenizers_from_llm(llm).from_pretrained(bentomodel_fs.getsyspath("/"), trust_remote_code=llm.__llm_trust_remote_code__, **tokenizer_attrs)
if tokenizer.pad_token_id is None:
if config.pad_token_id is not None: tokenizer.pad_token_id = config.pad_token_id
@@ -61,10 +59,9 @@ def load_tokenizer(llm: openllm.LLM[t.Any, T], **tokenizer_attrs: t.Any) -> T:
elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id
else: tokenizer.add_special_tokens({"pad_token": "[PAD]"})
return tokenizer
class _Caller(t.Protocol[P]):
def __call__(self, llm: openllm.LLM[M, T], *args: P.args, **kwargs: P.kwargs) -> t.Any: ...
def __call__(self, llm: openllm.LLM[M, T], *args: P.args, **kwargs: P.kwargs) -> t.Any:
...
_extras = ["get", "import_model", "save_pretrained", "load_model"]
def _make_dispatch_function(fn: str) -> _Caller[P]:
def caller(llm: openllm.LLM[M, T], *args: P.args, **kwargs: P.kwargs) -> t.Any:
@@ -75,17 +72,25 @@ def _make_dispatch_function(fn: str) -> _Caller[P]:
> [!NOTE] See 'openllm.serialisation.ggml' if 'llm.runtime="ggml"'
"""
return getattr(importlib.import_module(f".{llm.runtime}", __name__), fn)(llm, *args, **kwargs)
return caller
if t.TYPE_CHECKING:
def get(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> bentoml.Model: ...
def import_model(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> bentoml.Model: ...
def save_pretrained(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> None: ...
def load_model(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> M: ...
def get(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> bentoml.Model:
...
def import_model(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> bentoml.Model:
...
def save_pretrained(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> None:
...
def load_model(llm: openllm.LLM[M, T], *args: t.Any, **kwargs: t.Any) -> M:
...
_import_structure: dict[str, list[str]] = {"ggml": [], "transformers": [], "constants": []}
__all__ = ["ggml", "transformers", "constants", "load_tokenizer", *_extras]
def __dir__() -> list[str]: return sorted(__all__)
def __dir__() -> list[str]:
return sorted(__all__)
def __getattr__(name: str) -> t.Any:
if name == "load_tokenizer": return load_tokenizer
elif name in _import_structure: return importlib.import_module(f".{name}", __name__)

View File

@@ -9,8 +9,8 @@ import bentoml, openllm
if t.TYPE_CHECKING: from openllm_core._typing_compat import M
_conversion_strategy = {"pt": "ggml"}
def import_model(llm: openllm.LLM[t.Any, t.Any], *decls: t.Any, trust_remote_code: bool = True, **attrs: t.Any,) -> bentoml.Model: raise NotImplementedError("Currently work in progress.")
def import_model(llm: openllm.LLM[t.Any, t.Any], *decls: t.Any, trust_remote_code: bool = True, **attrs: t.Any,) -> bentoml.Model:
raise NotImplementedError("Currently work in progress.")
def get(llm: openllm.LLM[t.Any, t.Any], auto_import: bool = False) -> bentoml.Model:
"""Return an instance of ``bentoml.Model`` from given LLM instance.
@@ -30,5 +30,7 @@ def get(llm: openllm.LLM[t.Any, t.Any], auto_import: bool = False) -> bentoml.Mo
if auto_import:
return import_model(llm, trust_remote_code=llm.__llm_trust_remote_code__)
raise
def load_model(llm: openllm.LLM[M, t.Any], *decls: t.Any, **attrs: t.Any) -> M: raise NotImplementedError("Currently work in progress.")
def save_pretrained(llm: openllm.LLM[t.Any, t.Any], save_directory: str, **attrs: t.Any) -> None: raise NotImplementedError("Currently work in progress.")
def load_model(llm: openllm.LLM[M, t.Any], *decls: t.Any, **attrs: t.Any) -> M:
raise NotImplementedError("Currently work in progress.")
def save_pretrained(llm: openllm.LLM[t.Any, t.Any], save_directory: str, **attrs: t.Any) -> None:
raise NotImplementedError("Currently work in progress.")

View File

@@ -7,19 +7,12 @@ from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from bentoml._internal.models.model import ModelOptions
from .weights import HfIgnore
from ._helpers import (
check_unintialised_params,
infer_autoclass_from_llm,
infer_tokenizers_from_llm,
make_model_signatures,
process_config,
update_model,
)
from ._helpers import check_unintialised_params, infer_autoclass_from_llm, infer_tokenizers_from_llm, make_model_signatures, process_config, update_model
if t.TYPE_CHECKING:
import types
import vllm, auto_gptq as autogptq, transformers ,torch
import vllm, auto_gptq as autogptq, transformers, torch
import torch.nn
from bentoml._internal.models import ModelStore
@@ -33,7 +26,6 @@ else:
logger = logging.getLogger(__name__)
__all__ = ["import_model", "get", "load_model", "save_pretrained"]
@inject
def import_model(llm: openllm.LLM[M, T], *decls: t.Any, trust_remote_code: bool, _model_store: ModelStore = Provide[BentoMLContainer.model_store], **attrs: t.Any) -> bentoml.Model:
"""Auto detect model type from given model_id and import it to bentoml's model store.
@@ -106,7 +98,8 @@ def import_model(llm: openllm.LLM[M, T], *decls: t.Any, trust_remote_code: bool,
else:
# we will clone the all tings into the bentomodel path without loading model into memory
snapshot_download(llm.model_id, local_dir=bentomodel.path, local_dir_use_symlinks=False, ignore_patterns=HfIgnore.ignore_patterns(llm))
except Exception: raise
except Exception:
raise
else:
bentomodel.flush() # type: ignore[no-untyped-call]
bentomodel.save(_model_store)
@@ -117,7 +110,6 @@ def import_model(llm: openllm.LLM[M, T], *decls: t.Any, trust_remote_code: bool,
# in the case where users first run openllm start without the model available locally.
if openllm.utils.is_torch_available() and torch.cuda.is_available(): torch.cuda.empty_cache()
return bentomodel
def get(llm: openllm.LLM[M, T], auto_import: bool = False) -> bentoml.Model:
"""Return an instance of ``bentoml.Model`` from given LLM instance.
@@ -128,7 +120,8 @@ def get(llm: openllm.LLM[M, T], auto_import: bool = False) -> bentoml.Model:
"""
try:
model = bentoml.models.get(llm.tag)
if model.info.module not in ("openllm.serialisation.transformers" "bentoml.transformers", "bentoml._internal.frameworks.transformers", __name__): # NOTE: backward compatible with previous version of OpenLLM.
if model.info.module not in ("openllm.serialisation.transformers"
"bentoml.transformers", "bentoml._internal.frameworks.transformers", __name__): # NOTE: backward compatible with previous version of OpenLLM.
raise bentoml.exceptions.NotFound(f"Model {model.tag} was saved with module {model.info.module}, not loading with 'openllm.serialisation.transformers'.")
if "runtime" in model.info.labels and model.info.labels["runtime"] != llm.runtime:
raise openllm.exceptions.OpenLLMException(f"Model {model.tag} was saved with runtime {model.info.labels['runtime']}, not loading with {llm.runtime}.")
@@ -136,7 +129,6 @@ def get(llm: openllm.LLM[M, T], auto_import: bool = False) -> bentoml.Model:
except bentoml.exceptions.NotFound as err:
if auto_import: return import_model(llm, trust_remote_code=llm.__llm_trust_remote_code__)
raise err from None
def load_model(llm: openllm.LLM[M, T], *decls: t.Any, **attrs: t.Any) -> M:
"""Load the model from BentoML store.
@@ -156,7 +148,6 @@ def load_model(llm: openllm.LLM[M, T], *decls: t.Any, **attrs: t.Any) -> M:
if llm.bettertransformer and isinstance(model, transformers.PreTrainedModel): model = model.to_bettertransformer()
if llm.__llm_implementation__ in {"pt", "vllm"}: check_unintialised_params(model)
return t.cast("M", model)
def save_pretrained(llm: openllm.LLM[M, T], save_directory: str, is_main_process: bool = True, state_dict: DictStrAny | None = None, save_function: t.Any | None = None, push_to_hub: bool = False, max_shard_size: int | str = "10GB", safe_serialization: bool = False, variant: str | None = None, **attrs: t.Any) -> None:
save_function = t.cast(t.Callable[..., None], openllm.utils.first_not_none(save_function, default=torch.save))
model_save_attrs, tokenizer_save_attrs = openllm.utils.normalize_attrs_to_model_tokenizer_pair(**attrs)

View File

@@ -8,10 +8,10 @@ if t.TYPE_CHECKING:
from transformers.models.auto.auto_factory import _BaseAutoModelClass
from bentoml._internal.models.model import ModelSignaturesType
from openllm_core._typing_compat import DictStrAny, M, T
else: transformers, torch = openllm_core.utils.LazyLoader("transformers", globals(), "transformers"), openllm_core.utils.LazyLoader("torch", globals(), "torch")
else:
transformers, torch = openllm_core.utils.LazyLoader("transformers", globals(), "transformers"), openllm_core.utils.LazyLoader("torch", globals(), "torch")
_object_setattr = object.__setattr__
def process_config(model_id: str, trust_remote_code: bool, **attrs: t.Any) -> tuple[transformers.PretrainedConfig, DictStrAny, DictStrAny]:
"""A helper function that correctly parse config and attributes for transformers.PretrainedConfig.
@@ -31,12 +31,10 @@ def process_config(model_id: str, trust_remote_code: bool, **attrs: t.Any) -> tu
if copied_attrs.get("torch_dtype", None) == "auto": copied_attrs.pop("torch_dtype")
config, attrs = transformers.AutoConfig.from_pretrained(model_id, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **hub_attrs, **copied_attrs)
return config, hub_attrs, attrs
def infer_tokenizers_from_llm(__llm: openllm.LLM[t.Any, T], /) -> T:
__cls = getattr(transformers, openllm_core.utils.first_not_none(__llm.config["tokenizer_class"], default="AutoTokenizer"), None)
if __cls is None: raise ValueError(f"Cannot infer correct tokenizer class for {__llm}. Make sure to unset `tokenizer_class`")
return __cls
def infer_autoclass_from_llm(llm: openllm.LLM[M, T], config: transformers.PretrainedConfig, /) -> _BaseAutoModelClass:
if llm.config["trust_remote_code"]:
autoclass = "AutoModelForSeq2SeqLM" if llm.config["model_type"] == "seq2seq_lm" else "AutoModelForCausalLM"
@@ -50,22 +48,16 @@ def infer_autoclass_from_llm(llm: openllm.LLM[M, T], config: transformers.Pretra
elif type(config) in transformers.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING: idx = 1
else: raise openllm.exceptions.OpenLLMException(f"Model type {type(config)} is not supported yet.")
return getattr(transformers, FRAMEWORK_TO_AUTOCLASS_MAPPING[llm.__llm_implementation__][idx])
def check_unintialised_params(model: torch.nn.Module) -> None:
unintialized = [n for n, param in model.named_parameters() if param.data.device == torch.device("meta")]
if len(unintialized) > 0: raise RuntimeError(f"Found the following unintialized parameters in {model}: {unintialized}")
def update_model(bentomodel: bentoml.Model, metadata: DictStrAny) -> bentoml.Model:
based: DictStrAny = copy.deepcopy(bentomodel.info.metadata)
based.update(metadata)
_object_setattr(
bentomodel, "_info",
ModelInfo( # type: ignore[call-arg] # XXX: remove me once upstream is merged
tag=bentomodel.info.tag, module=bentomodel.info.module, labels=bentomodel.info.labels, options=bentomodel.info.options.to_dict(), signatures=bentomodel.info.signatures, context=bentomodel.info.context, api_version=bentomodel.info.api_version, creation_time=bentomodel.info.creation_time, metadata=based
)
)
_object_setattr(bentomodel, "_info", ModelInfo( # type: ignore[call-arg] # XXX: remove me once upstream is merged
tag=bentomodel.info.tag, module=bentomodel.info.module, labels=bentomodel.info.labels, options=bentomodel.info.options.to_dict(), signatures=bentomodel.info.signatures, context=bentomodel.info.context, api_version=bentomodel.info.api_version, creation_time=bentomodel.info.creation_time, metadata=based
))
return bentomodel
# NOTE: sync with bentoml/_internal/frameworks/transformers.py#make_default_signatures
def make_model_signatures(llm: openllm.LLM[M, T]) -> ModelSignaturesType:
infer_fn: tuple[str, ...] = ("__call__",)

View File

@@ -4,14 +4,15 @@ from huggingface_hub import HfApi
if t.TYPE_CHECKING:
import openllm
from openllm_core._typing_compat import M, T
def has_safetensors_weights(model_id: str, revision: str | None = None) -> bool: return any(s.rfilename.endswith(".safetensors") for s in HfApi().model_info(model_id, revision=revision).siblings)
def has_safetensors_weights(model_id: str, revision: str | None = None) -> bool:
return any(s.rfilename.endswith(".safetensors") for s in HfApi().model_info(model_id, revision=revision).siblings)
@attr.define(slots=True)
class HfIgnore:
safetensors = "*.safetensors"
pt = "*.bin"
tf = "*.h5"
flax = "*.msgpack"
@classmethod
def ignore_patterns(cls, llm: openllm.LLM[M, T]) -> list[str]:
if llm.__llm_implementation__ == "vllm": base = [cls.tf, cls.flax, cls.safetensors]

View File

@@ -5,18 +5,13 @@ we won't ensure backward compatibility for these functions. So use with caution.
"""
from __future__ import annotations
import typing as t, openllm_core
from . import (
dummy_flax_objects as dummy_flax_objects,
dummy_pt_objects as dummy_pt_objects,
dummy_tf_objects as dummy_tf_objects,
dummy_vllm_objects as dummy_vllm_objects,
)
from . import (dummy_flax_objects as dummy_flax_objects, dummy_pt_objects as dummy_pt_objects, dummy_tf_objects as dummy_tf_objects, dummy_vllm_objects as dummy_vllm_objects,)
if t.TYPE_CHECKING:
from openllm_core._typing_compat import LiteralRuntime
import openllm
def generate_labels(llm: openllm.LLM[t.Any, t.Any]) -> dict[str, t.Any]: return {"runtime": llm.runtime, "framework": "openllm", "model_name": llm.config["model_name"], "architecture": llm.config["architecture"], "serialisation_format": llm._serialisation_format}
def generate_labels(llm: openllm.LLM[t.Any, t.Any]) -> dict[str, t.Any]:
return {"runtime": llm.runtime, "framework": "openllm", "model_name": llm.config["model_name"], "architecture": llm.config["architecture"], "serialisation_format": llm._serialisation_format}
def infer_auto_class(implementation: LiteralRuntime) -> type[openllm.AutoLLM | openllm.AutoTFLLM | openllm.AutoFlaxLLM | openllm.AutoVLLM]:
import openllm
if implementation == "tf": return openllm.AutoTFLLM
@@ -24,9 +19,9 @@ def infer_auto_class(implementation: LiteralRuntime) -> type[openllm.AutoLLM | o
elif implementation == "pt": return openllm.AutoLLM
elif implementation == "vllm": return openllm.AutoVLLM
else: raise RuntimeError(f"Unknown implementation: {implementation} (supported: 'pt', 'flax', 'tf', 'vllm')")
__all__ = ["generate_labels", "infer_auto_class", "dummy_flax_objects", "dummy_pt_objects", "dummy_tf_objects", "dummy_vllm_objects"]
def __dir__() -> t.Sequence[str]: return sorted(__all__)
def __dir__() -> t.Sequence[str]:
return sorted(__all__)
def __getattr__(it: str) -> t.Any:
if hasattr(openllm_core.utils, it): return getattr(openllm_core.utils, it)
else: raise AttributeError(f"module {__name__} has no attribute {it}")