mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-01-17 03:47:54 -05:00
refactor: focus (#730)
* perf: remove based images Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> * chore: update changelog Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> * chore: move dockerifle to run on release only Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> * chore: cleanup unused types Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> --------- Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
@@ -1,17 +1,9 @@
|
||||
from __future__ import annotations
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
import click
|
||||
import click_option_group as cog
|
||||
import inflection
|
||||
import functools, logging, os, typing as t
|
||||
import bentoml, openllm, click, inflection, click_option_group as cog
|
||||
from bentoml_cli.utils import BentoMLCommandGroup
|
||||
from click import shell_completion as sc
|
||||
|
||||
import bentoml
|
||||
import openllm
|
||||
from openllm_core._configuration import LLMConfig
|
||||
from openllm_core._typing_compat import (
|
||||
Concatenate,
|
||||
@@ -23,7 +15,6 @@ from openllm_core._typing_compat import (
|
||||
)
|
||||
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
|
||||
|
||||
|
||||
class _OpenLLM_GenericInternalConfig(LLMConfig):
|
||||
__config__ = {
|
||||
'name_type': 'lowercase',
|
||||
@@ -38,7 +29,6 @@ class _OpenLLM_GenericInternalConfig(LLMConfig):
|
||||
temperature: float = 0.75
|
||||
max_new_tokens: int = 128
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
P = ParamSpec('P')
|
||||
@@ -47,7 +37,6 @@ LiteralOutput = t.Literal['json', 'pretty', 'porcelain']
|
||||
_AnyCallable = t.Callable[..., t.Any]
|
||||
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
|
||||
|
||||
|
||||
def bento_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]:
|
||||
return [
|
||||
sc.CompletionItem(str(it.tag), help='Bento')
|
||||
@@ -55,7 +44,6 @@ def bento_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete
|
||||
if str(it.tag).startswith(incomplete) and all(k in it.info.labels for k in {'start_name', 'bundler'})
|
||||
]
|
||||
|
||||
|
||||
def model_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]:
|
||||
return [
|
||||
sc.CompletionItem(inflection.dasherize(it), help='Model')
|
||||
@@ -63,7 +51,6 @@ def model_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete
|
||||
if it.startswith(incomplete)
|
||||
]
|
||||
|
||||
|
||||
def parse_config_options(
|
||||
config: LLMConfig,
|
||||
server_timeout: int,
|
||||
@@ -132,60 +119,52 @@ def _id_callback(ctx: click.Context, _: click.Parameter, value: t.Tuple[str, ...
|
||||
return None
|
||||
|
||||
|
||||
def start_decorator(serve_grpc: bool = False) -> t.Callable[[FC], t.Callable[[FC], FC]]:
|
||||
def wrapper(fn: FC) -> t.Callable[[FC], FC]:
|
||||
composed = compose(
|
||||
_OpenLLM_GenericInternalConfig.parse,
|
||||
_http_server_args if not serve_grpc else _grpc_server_args,
|
||||
cog.optgroup.group('General LLM Options', help='The following options are related to running LLM Server.'),
|
||||
dtype_option(factory=cog.optgroup),
|
||||
model_version_option(factory=cog.optgroup),
|
||||
cog.optgroup.option('--server-timeout', type=int, default=None, help='Server timeout in seconds'),
|
||||
workers_per_resource_option(factory=cog.optgroup),
|
||||
cors_option(factory=cog.optgroup),
|
||||
backend_option(factory=cog.optgroup),
|
||||
cog.optgroup.group(
|
||||
'LLM Optimization Options',
|
||||
help='''Optimization related options.
|
||||
def start_decorator(fn: FC) -> FC:
|
||||
composed = compose(
|
||||
_OpenLLM_GenericInternalConfig.parse,
|
||||
_http_server_args,
|
||||
cog.optgroup.group('General LLM Options', help='The following options are related to running LLM Server.'),
|
||||
dtype_option(factory=cog.optgroup),
|
||||
model_version_option(factory=cog.optgroup),
|
||||
cog.optgroup.option('--server-timeout', type=int, default=None, help='Server timeout in seconds'),
|
||||
workers_per_resource_option(factory=cog.optgroup),
|
||||
cors_option(factory=cog.optgroup),
|
||||
backend_option(factory=cog.optgroup),
|
||||
cog.optgroup.group(
|
||||
'LLM Optimization Options',
|
||||
help='''Optimization related options.
|
||||
|
||||
OpenLLM supports running model k-bit quantization (8-bit, 4-bit), GPTQ quantization, PagedAttention via vLLM.
|
||||
OpenLLM supports running model k-bit quantization (8-bit, 4-bit), GPTQ quantization, PagedAttention via vLLM.
|
||||
|
||||
The following are either in our roadmap or currently being worked on:
|
||||
The following are either in our roadmap or currently being worked on:
|
||||
|
||||
- DeepSpeed Inference: [link](https://www.deepspeed.ai/inference/)
|
||||
- GGML: Fast inference on [bare metal](https://github.com/ggerganov/ggml)
|
||||
''',
|
||||
),
|
||||
quantize_option(factory=cog.optgroup),
|
||||
serialisation_option(factory=cog.optgroup),
|
||||
cog.optgroup.option(
|
||||
'--device',
|
||||
type=dantic.CUDA,
|
||||
multiple=True,
|
||||
envvar='CUDA_VISIBLE_DEVICES',
|
||||
callback=parse_device_callback,
|
||||
help='Assign GPU devices (if available)',
|
||||
show_envvar=True,
|
||||
),
|
||||
adapter_id_option(factory=cog.optgroup),
|
||||
click.option('--return-process', is_flag=True, default=False, help='Internal use only.', hidden=True),
|
||||
)
|
||||
return composed(fn)
|
||||
- DeepSpeed Inference: [link](https://www.deepspeed.ai/inference/)
|
||||
- GGML: Fast inference on [bare metal](https://github.com/ggerganov/ggml)
|
||||
''',
|
||||
),
|
||||
quantize_option(factory=cog.optgroup),
|
||||
serialisation_option(factory=cog.optgroup),
|
||||
cog.optgroup.option(
|
||||
'--device',
|
||||
type=dantic.CUDA,
|
||||
multiple=True,
|
||||
envvar='CUDA_VISIBLE_DEVICES',
|
||||
callback=parse_device_callback,
|
||||
help='Assign GPU devices (if available)',
|
||||
show_envvar=True,
|
||||
),
|
||||
adapter_id_option(factory=cog.optgroup),
|
||||
click.option('--return-process', is_flag=True, default=False, help='Internal use only.', hidden=True),
|
||||
)
|
||||
|
||||
return wrapper
|
||||
return composed(fn)
|
||||
|
||||
|
||||
def parse_device_callback(
|
||||
ctx: click.Context, param: click.Parameter, value: tuple[tuple[str], ...] | None
|
||||
) -> t.Tuple[str, ...] | None:
|
||||
def parse_device_callback(_: click.Context, param: click.Parameter, value: tuple[tuple[str], ...] | None) -> t.Tuple[str, ...] | None:
|
||||
if value is None:
|
||||
return value
|
||||
if not isinstance(value, tuple):
|
||||
ctx.fail(f'{param} only accept multiple values, not {type(value)} (value: {value})')
|
||||
el: t.Tuple[str, ...] = tuple(i for k in value for i in k)
|
||||
# NOTE: --device all is a special case
|
||||
if len(el) == 1 and el[0] == 'all':
|
||||
return tuple(map(str, openllm.utils.available_devices()))
|
||||
if len(el) == 1 and el[0] == 'all': return tuple(map(str, openllm.utils.available_devices()))
|
||||
return el
|
||||
|
||||
|
||||
@@ -195,18 +174,12 @@ def parse_device_callback(
|
||||
_IGNORED_OPTIONS = {'working_dir', 'production', 'protocol_version'}
|
||||
|
||||
|
||||
def parse_serve_args(serve_grpc: bool) -> t.Callable[[t.Callable[..., LLMConfig]], t.Callable[[FC], FC]]:
|
||||
'''Parsing `bentoml serve|serve-grpc` click.Option to be parsed via `openllm start`.'''
|
||||
def parse_serve_args() -> t.Callable[[t.Callable[..., LLMConfig]], t.Callable[[FC], FC]]:
|
||||
from bentoml_cli.cli import cli
|
||||
|
||||
command = 'serve' if not serve_grpc else 'serve-grpc'
|
||||
group = cog.optgroup.group(
|
||||
f"Start a {'HTTP' if not serve_grpc else 'gRPC'} server options",
|
||||
help=f"Related to serving the model [synonymous to `bentoml {'serve-http' if not serve_grpc else command }`]",
|
||||
)
|
||||
|
||||
group = cog.optgroup.group('Start a HTTP server options', help='Related to serving the model [synonymous to `bentoml serve-http`]')
|
||||
def decorator(f: t.Callable[Concatenate[int, t.Optional[str], P], LLMConfig]) -> t.Callable[[FC], FC]:
|
||||
serve_command = cli.commands[command]
|
||||
serve_command = cli.commands['serve']
|
||||
# The first variable is the argument bento
|
||||
# The last five is from BentoMLCommandGroup.NUMBER_OF_COMMON_PARAMS
|
||||
serve_options = [
|
||||
@@ -225,12 +198,9 @@ def parse_serve_args(serve_grpc: bool) -> t.Callable[[t.Callable[..., LLMConfig]
|
||||
param_decls = (*attrs.pop('opts'), *attrs.pop('secondary_opts'))
|
||||
f = cog.optgroup.option(*param_decls, **attrs)(f)
|
||||
return group(f)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
_http_server_args, _grpc_server_args = parse_serve_args(False), parse_serve_args(True)
|
||||
|
||||
_http_server_args = parse_serve_args()
|
||||
|
||||
def _click_factory_type(*param_decls: t.Any, **attrs: t.Any) -> t.Callable[[FC | None], FC]:
|
||||
'''General ``@click`` decorator with some sauce.
|
||||
@@ -278,11 +248,9 @@ def cors_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
def machine_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_option('--machine', is_flag=True, default=False, hidden=True, **attrs)(f)
|
||||
|
||||
|
||||
def dtype_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_option(
|
||||
'--dtype',
|
||||
@@ -293,7 +261,6 @@ def dtype_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[F
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
def model_id_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_option(
|
||||
'--model-id',
|
||||
@@ -327,7 +294,6 @@ def backend_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
def model_name_argument(f: _AnyCallable | None = None, required: bool = True, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_argument(
|
||||
'model_name',
|
||||
@@ -336,7 +302,6 @@ def model_name_argument(f: _AnyCallable | None = None, required: bool = True, **
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
def quantize_option(f: _AnyCallable | None = None, *, build: bool = False, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_option(
|
||||
'--quantise',
|
||||
@@ -405,7 +370,6 @@ def workers_per_resource_option(
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
def serialisation_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_option(
|
||||
'--serialisation',
|
||||
@@ -429,25 +393,8 @@ def serialisation_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Cal
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
def container_registry_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
|
||||
return cli_option(
|
||||
'--container-registry',
|
||||
'container_registry',
|
||||
type=click.Choice(list(openllm.bundle.CONTAINER_NAMES)),
|
||||
default='ecr',
|
||||
show_default=True,
|
||||
show_envvar=True,
|
||||
envvar='OPENLLM_CONTAINER_REGISTRY',
|
||||
callback=container_registry_callback,
|
||||
help='The default container registry to get the base image for building BentoLLM. Currently, it supports ecr, ghcr, docker',
|
||||
**attrs,
|
||||
)(f)
|
||||
|
||||
|
||||
_wpr_strategies = {'round_robin', 'conserved'}
|
||||
|
||||
|
||||
def workers_per_resource_callback(ctx: click.Context, param: click.Parameter, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return value
|
||||
@@ -465,11 +412,3 @@ def workers_per_resource_callback(ctx: click.Context, param: click.Parameter, va
|
||||
) from None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def container_registry_callback(ctx: click.Context, param: click.Parameter, value: str | None) -> str | None:
|
||||
if value is None:
|
||||
return value
|
||||
if value not in openllm.bundle.supported_registries:
|
||||
raise click.BadParameter(f'Value must be one of {openllm.bundle.supported_registries}', ctx, param)
|
||||
return value
|
||||
|
||||
@@ -20,13 +20,7 @@ from openllm_core.utils import WARNING_ENV_VAR, codegen, first_not_none, get_dis
|
||||
if t.TYPE_CHECKING:
|
||||
from bentoml._internal.bento import BentoStore
|
||||
from openllm_core._configuration import LLMConfig
|
||||
from openllm_core._typing_compat import (
|
||||
LiteralBackend,
|
||||
LiteralContainerRegistry,
|
||||
LiteralContainerVersionStrategy,
|
||||
LiteralQuantise,
|
||||
LiteralString,
|
||||
)
|
||||
from openllm_core._typing_compat import LiteralBackend, LiteralQuantise, LiteralString
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -125,8 +119,6 @@ def _build(
|
||||
enable_features: tuple[str, ...] | None = None,
|
||||
dockerfile_template: str | None = None,
|
||||
overwrite: bool = False,
|
||||
container_registry: LiteralContainerRegistry | None = None,
|
||||
container_version_strategy: LiteralContainerVersionStrategy | None = None,
|
||||
push: bool = False,
|
||||
force_push: bool = False,
|
||||
containerize: bool = False,
|
||||
@@ -159,8 +151,6 @@ def _build(
|
||||
containerize: Whether to containerize the Bento after building. '--containerize' is the shortcut of 'openllm build && bentoml containerize'.
|
||||
Note that 'containerize' and 'push' are mutually exclusive
|
||||
container_registry: Container registry to choose the base OpenLLM container image to build from. Default to ECR.
|
||||
container_registry: Container registry to choose the base OpenLLM container image to build from. Default to ECR.
|
||||
container_version_strategy: The container version strategy. Default to the latest release of OpenLLM.
|
||||
serialisation: Serialisation for saving models. Default to 'safetensors', which is equivalent to `safe_serialization=True`
|
||||
additional_args: Additional arguments to pass to ``openllm build``.
|
||||
bento_store: Optional BentoStore for saving this BentoLLM. Default to the default BentoML local store.
|
||||
@@ -205,11 +195,6 @@ def _build(
|
||||
args.extend(['--bento-version', bento_version])
|
||||
if dockerfile_template:
|
||||
args.extend(['--dockerfile-template', dockerfile_template])
|
||||
if container_registry is None:
|
||||
container_registry = 'ecr'
|
||||
if container_version_strategy is None:
|
||||
container_version_strategy = 'release'
|
||||
args.extend(['--container-registry', container_registry, '--container-version-strategy', container_version_strategy])
|
||||
if additional_args:
|
||||
args.extend(additional_args)
|
||||
if force_push:
|
||||
|
||||
@@ -1,31 +1,8 @@
|
||||
from __future__ import annotations
|
||||
import enum
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import random
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
import attr
|
||||
import click
|
||||
import click_option_group as cog
|
||||
import fs
|
||||
import fs.copy
|
||||
import fs.errors
|
||||
import inflection
|
||||
import orjson
|
||||
import enum, functools, inspect, itertools, logging, os, platform, random, subprocess, threading, time, traceback, typing as t
|
||||
import attr, click, fs, inflection, bentoml, openllm, orjson, fs.copy, fs.errors, click_option_group as cog
|
||||
from bentoml_cli.utils import BentoMLCommandGroup, opt_callback
|
||||
from simple_di import Provide, inject
|
||||
|
||||
import bentoml
|
||||
import openllm
|
||||
from bentoml._internal.cloud.config import CloudClientConfig
|
||||
from bentoml._internal.configuration.containers import BentoMLContainer
|
||||
from bentoml._internal.models.model import ModelStore
|
||||
@@ -70,7 +47,6 @@ from ._factory import (
|
||||
FC,
|
||||
_AnyCallable,
|
||||
backend_option,
|
||||
container_registry_option,
|
||||
dtype_option,
|
||||
machine_option,
|
||||
model_name_argument,
|
||||
@@ -88,7 +64,6 @@ if t.TYPE_CHECKING:
|
||||
from bentoml._internal.container import DefaultBuilder
|
||||
from openllm_client._schemas import StreamingResponse
|
||||
from openllm_core._configuration import LLMConfig
|
||||
from openllm_core._typing_compat import LiteralContainerRegistry, LiteralContainerVersionStrategy
|
||||
else:
|
||||
torch = LazyLoader('torch', globals(), 'torch')
|
||||
|
||||
@@ -248,7 +223,7 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
|
||||
return super().get_command(ctx, cmd_name)
|
||||
|
||||
def list_commands(self, ctx: click.Context) -> list[str]:
|
||||
return super().list_commands(ctx) + t.cast('Extensions', extension_command).list_commands(ctx)
|
||||
return super().list_commands(ctx) + extension_command.list_commands(ctx)
|
||||
|
||||
def command(self, *args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Callable[..., t.Any]], click.Command]:
|
||||
"""Override the default 'cli.command' with supports for aliases for given command, and it wraps the implementation with common parameters."""
|
||||
@@ -371,7 +346,7 @@ def cli() -> None:
|
||||
default=None,
|
||||
help='Maximum sequence length for the model. If not specified, we will use the default value from the model config.',
|
||||
)
|
||||
@start_decorator(serve_grpc=False)
|
||||
@start_decorator
|
||||
def start_command(
|
||||
model_id: str,
|
||||
server_timeout: int,
|
||||
@@ -396,26 +371,21 @@ def start_command(
|
||||
$ openllm <start|start-http> <model_id> --<options> ...
|
||||
```
|
||||
'''
|
||||
if backend == 'pt': logger.warning('PyTorch backend is deprecated and will be removed in future releases. Make sure to use vLLM instead.')
|
||||
if model_id in openllm.CONFIG_MAPPING:
|
||||
_model_name = model_id
|
||||
if deprecated_model_id is not None:
|
||||
model_id = deprecated_model_id
|
||||
else:
|
||||
model_id = openllm.AutoConfig.for_model(_model_name)['default_id']
|
||||
termui.warning(
|
||||
f"Passing 'openllm start {_model_name}{'' if deprecated_model_id is None else ' --model-id ' + deprecated_model_id}' is deprecated and will be remove in a future version. Use 'openllm start {model_id}' instead."
|
||||
)
|
||||
logger.warning("Passing 'openllm start %s%s' is deprecated and will be remove in a future version. Use 'openllm start %s' instead.", _model_name, '' if deprecated_model_id is None else f' --model-id {deprecated_model_id}', model_id)
|
||||
|
||||
adapter_map: dict[str, str] | None = attrs.pop('adapter_map', None)
|
||||
|
||||
from openllm.serialisation.transformers.weights import has_safetensors_weights
|
||||
|
||||
serialisation = t.cast(
|
||||
LiteralSerialisation,
|
||||
first_not_none(
|
||||
serialisation, default='safetensors' if has_safetensors_weights(model_id, model_version) else 'legacy'
|
||||
),
|
||||
)
|
||||
serialisation = first_not_none(serialisation, default='safetensors' if has_safetensors_weights(model_id, model_version) else 'legacy')
|
||||
|
||||
if serialisation == 'safetensors' and quantize is not None:
|
||||
logger.warning("'--quantize=%s' might not work with 'safetensors' serialisation format.", quantize)
|
||||
logger.warning(
|
||||
@@ -449,8 +419,7 @@ def start_command(
|
||||
config, server_attrs = llm.config.model_validate_click(**attrs)
|
||||
server_timeout = first_not_none(server_timeout, default=config['timeout'])
|
||||
server_attrs.update({'working_dir': pkg.source_locations('openllm'), 'timeout': server_timeout})
|
||||
# XXX: currently, theres no development args in bentoml.Server. To be fixed upstream.
|
||||
development = server_attrs.pop('development')
|
||||
development = server_attrs.pop('development') # XXX: currently, theres no development args in bentoml.Server. To be fixed upstream.
|
||||
server_attrs.setdefault('production', not development)
|
||||
|
||||
start_env = process_environ(
|
||||
@@ -479,145 +448,8 @@ def start_command(
|
||||
# NOTE: Return the configuration for telemetry purposes.
|
||||
return config
|
||||
|
||||
|
||||
@cli.command(
|
||||
context_settings=termui.CONTEXT_SETTINGS,
|
||||
name='start-grpc',
|
||||
short_help='Start a gRPC LLMServer for any supported LLM.',
|
||||
)
|
||||
@click.argument('model_id', type=click.STRING, metavar='[REMOTE_REPO/MODEL_ID | /path/to/local/model]', required=True)
|
||||
@click.option(
|
||||
'--model-id',
|
||||
'deprecated_model_id',
|
||||
type=click.STRING,
|
||||
default=None,
|
||||
hidden=True,
|
||||
metavar='[REMOTE_REPO/MODEL_ID | /path/to/local/model]',
|
||||
help='Deprecated. Use positional argument instead.',
|
||||
)
|
||||
@start_decorator(serve_grpc=True)
|
||||
@click.option(
|
||||
'--max-model-len',
|
||||
'--max_model_len',
|
||||
'max_model_len',
|
||||
default=None,
|
||||
help='Maximum sequence length for the model. If not specified, we will use the default value from the model config.',
|
||||
)
|
||||
def start_grpc_command(
|
||||
model_id: str,
|
||||
server_timeout: int,
|
||||
model_version: str | None,
|
||||
workers_per_resource: t.Literal['conserved', 'round_robin'] | LiteralString,
|
||||
device: t.Tuple[str, ...],
|
||||
quantize: LiteralQuantise | None,
|
||||
backend: LiteralBackend | None,
|
||||
serialisation: LiteralSerialisation | None,
|
||||
cors: bool,
|
||||
dtype: LiteralDtype,
|
||||
adapter_id: str | None,
|
||||
return_process: bool,
|
||||
deprecated_model_id: str | None,
|
||||
max_model_len: int | None,
|
||||
**attrs: t.Any,
|
||||
) -> LLMConfig | subprocess.Popen[bytes]:
|
||||
'''Start any LLM as a gRPC server.
|
||||
|
||||
\b
|
||||
```bash
|
||||
$ openllm start-grpc <model_id> --<options> ...
|
||||
```
|
||||
'''
|
||||
termui.warning(
|
||||
'Continuous batching is currently not yet supported with gPRC. If you want to use continuous batching with gRPC, feel free to open a GitHub issue about your usecase.\n'
|
||||
)
|
||||
if model_id in openllm.CONFIG_MAPPING:
|
||||
_model_name = model_id
|
||||
if deprecated_model_id is not None:
|
||||
model_id = deprecated_model_id
|
||||
else:
|
||||
model_id = openllm.AutoConfig.for_model(_model_name)['default_id']
|
||||
termui.warning(
|
||||
f"Passing 'openllm start-grpc {_model_name}{'' if deprecated_model_id is None else ' --model-id ' + deprecated_model_id}' is deprecated and will be remove in a future version. Use 'openllm start-grpc {model_id}' instead."
|
||||
)
|
||||
|
||||
adapter_map: dict[str, str] | None = attrs.pop('adapter_map', None)
|
||||
|
||||
from openllm.serialisation.transformers.weights import has_safetensors_weights
|
||||
|
||||
serialisation = first_not_none(
|
||||
serialisation, default='safetensors' if has_safetensors_weights(model_id, model_version) else 'legacy'
|
||||
)
|
||||
if serialisation == 'safetensors' and quantize is not None:
|
||||
logger.warning("'--quantize=%s' might not work with 'safetensors' serialisation format.", quantize)
|
||||
logger.warning(
|
||||
"Make sure to check out '%s' repository to see if the weights is in '%s' format if unsure.",
|
||||
model_id,
|
||||
serialisation,
|
||||
)
|
||||
logger.info("Tip: You can always fallback to '--serialisation legacy' when running quantisation.")
|
||||
|
||||
import torch
|
||||
|
||||
if backend == 'pt' and not torch.cuda.is_available():
|
||||
if dtype == 'auto':
|
||||
dtype = 'float'
|
||||
elif dtype not in {'float', 'float32'}:
|
||||
logger.warning('"bfloat16" and "half" are not supported on CPU. OpenLLM will default fallback to "float32".')
|
||||
dtype = 'float' # we need to cast back to full precision if cuda is not available
|
||||
llm = openllm.LLM[t.Any, t.Any](
|
||||
model_id=model_id,
|
||||
model_version=model_version,
|
||||
backend=backend,
|
||||
adapter_map=adapter_map,
|
||||
quantize=quantize,
|
||||
serialisation=serialisation,
|
||||
dtype=dtype,
|
||||
max_model_len=max_model_len,
|
||||
trust_remote_code=check_bool_env('TRUST_REMOTE_CODE', False),
|
||||
)
|
||||
backend_warning(llm.__llm_backend__)
|
||||
|
||||
config, server_attrs = llm.config.model_validate_click(**attrs)
|
||||
server_timeout = first_not_none(server_timeout, default=config['timeout'])
|
||||
server_attrs.update({'working_dir': pkg.source_locations('openllm'), 'timeout': server_timeout})
|
||||
server_attrs['grpc_protocol_version'] = 'v1'
|
||||
# XXX: currently, theres no development args in bentoml.Server. To be fixed upstream.
|
||||
development = server_attrs.pop('development')
|
||||
server_attrs.setdefault('production', not development)
|
||||
|
||||
start_env = process_environ(
|
||||
config,
|
||||
server_timeout,
|
||||
process_workers_per_resource(first_not_none(workers_per_resource, default=config['workers_per_resource']), device),
|
||||
device,
|
||||
cors,
|
||||
model_id,
|
||||
adapter_map,
|
||||
serialisation,
|
||||
llm,
|
||||
)
|
||||
|
||||
server = bentoml.GrpcServer('_service:svc', **server_attrs)
|
||||
openllm.utils.analytics.track_start_init(llm.config)
|
||||
|
||||
try:
|
||||
build_bento_instruction(llm, model_id, serialisation, adapter_map)
|
||||
it = run_server(server.args, start_env, return_process=return_process)
|
||||
if return_process:
|
||||
return it
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
# NOTE: Return the configuration for telemetry purposes.
|
||||
return config
|
||||
|
||||
|
||||
def process_environ(
|
||||
config, server_timeout, wpr, device, cors, model_id, adapter_map, serialisation, llm, use_current_env=True
|
||||
) -> t.Dict[str, t.Any]:
|
||||
environ = parse_config_options(
|
||||
config, server_timeout, wpr, device, cors, os.environ.copy() if use_current_env else {}
|
||||
)
|
||||
def process_environ(config, server_timeout, wpr, device, cors, model_id, adapter_map, serialisation, llm, use_current_env=True):
|
||||
environ = parse_config_options(config, server_timeout, wpr, device, cors, os.environ.copy() if use_current_env else {})
|
||||
environ.update(
|
||||
{
|
||||
'OPENLLM_MODEL_ID': model_id,
|
||||
@@ -631,11 +463,9 @@ def process_environ(
|
||||
'TRUST_REMOTE_CODE': str(llm.trust_remote_code),
|
||||
}
|
||||
)
|
||||
if llm.quantise:
|
||||
environ['QUANTIZE'] = str(llm.quantise)
|
||||
if llm.quantise: environ['QUANTIZE'] = str(llm.quantise)
|
||||
return environ
|
||||
|
||||
|
||||
def process_workers_per_resource(wpr: str | float | int, device: tuple[str, ...]) -> TypeGuard[float]:
|
||||
if isinstance(wpr, str):
|
||||
if wpr == 'round_robin':
|
||||
@@ -653,7 +483,6 @@ def process_workers_per_resource(wpr: str | float | int, device: tuple[str, ...]
|
||||
wpr = float(wpr)
|
||||
return wpr
|
||||
|
||||
|
||||
def build_bento_instruction(llm, model_id, serialisation, adapter_map):
|
||||
cmd_name = f'openllm build {model_id} --backend {llm.__llm_backend__}'
|
||||
if llm.quantise:
|
||||
@@ -907,13 +736,6 @@ class BuildBentoOutput(t.TypedDict):
|
||||
help='Optional custom dockerfile template to be used with this BentoLLM.',
|
||||
)
|
||||
@serialisation_option
|
||||
@container_registry_option
|
||||
@click.option(
|
||||
'--container-version-strategy',
|
||||
type=click.Choice(['release', 'latest', 'nightly']),
|
||||
default='release',
|
||||
help="Default container version strategy for the image from '--container-registry'",
|
||||
)
|
||||
@cog.optgroup.group(cls=cog.MutuallyExclusiveOptionGroup, name='Utilities options') # type: ignore[misc]
|
||||
@cog.optgroup.option(
|
||||
'--containerize',
|
||||
@@ -951,8 +773,6 @@ def build_command(
|
||||
containerize: bool,
|
||||
push: bool,
|
||||
serialisation: LiteralSerialisation | None,
|
||||
container_registry: LiteralContainerRegistry,
|
||||
container_version_strategy: LiteralContainerVersionStrategy,
|
||||
force_push: bool,
|
||||
**_: t.Any,
|
||||
) -> BuildBentoOutput:
|
||||
@@ -991,6 +811,10 @@ def build_command(
|
||||
|
||||
state = ItemState.NOT_FOUND
|
||||
|
||||
if backend == 'pt':
|
||||
logger.warning("PyTorch backend is deprecated and will be removed from the next releases. Will set default backend to 'vllm' instead.")
|
||||
backend = 'vllm'
|
||||
|
||||
llm = openllm.LLM[t.Any, t.Any](
|
||||
model_id=model_id,
|
||||
model_version=model_version,
|
||||
@@ -1069,8 +893,6 @@ def build_command(
|
||||
quantize=quantize,
|
||||
extra_dependencies=enable_features,
|
||||
dockerfile_template=dockerfile_template_path,
|
||||
container_registry=container_registry,
|
||||
container_version_strategy=container_version_strategy,
|
||||
)
|
||||
if state != ItemState.OVERWRITE:
|
||||
state = ItemState.ADDED
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import pathlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
import click
|
||||
import orjson
|
||||
|
||||
import bentoml
|
||||
import openllm
|
||||
from openllm_cli import termui
|
||||
from openllm_cli._factory import container_registry_option, machine_option
|
||||
from openllm_core.utils import get_debug_mode, pkg
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from openllm_core._typing_compat import LiteralContainerRegistry, LiteralContainerVersionStrategy
|
||||
|
||||
_BUILDER = bentoml.container.get_backend('buildx')
|
||||
_module_location = pkg.source_locations('openllm')
|
||||
|
||||
|
||||
def build_container(
|
||||
registries: LiteralContainerRegistry | t.Sequence[LiteralContainerRegistry] | None = None,
|
||||
version_strategy: LiteralContainerVersionStrategy = 'release',
|
||||
push: bool = False,
|
||||
machine: bool = False,
|
||||
) -> dict[str | LiteralContainerRegistry, str]:
|
||||
try:
|
||||
if not _BUILDER.health():
|
||||
raise openllm.exceptions.Error
|
||||
except (openllm.exceptions.Error, subprocess.CalledProcessError):
|
||||
raise RuntimeError(
|
||||
'Building base container requires BuildKit (via Buildx) to be installed. See https://docs.docker.com/build/buildx/install/ for instalation instruction.'
|
||||
) from None
|
||||
if not shutil.which('nvidia-container-runtime'):
|
||||
raise RuntimeError('NVIDIA Container Toolkit is required to compile CUDA kernel in container.')
|
||||
if not _module_location:
|
||||
raise RuntimeError("Failed to determine source location of 'openllm'. (Possible broken installation)")
|
||||
pyproject_path = pathlib.Path(_module_location).parent.parent / 'pyproject.toml'
|
||||
if not pyproject_path.exists():
|
||||
raise ValueError(
|
||||
"This utility can only be run within OpenLLM git repository. Clone it first with 'git clone https://github.com/bentoml/OpenLLM.git'"
|
||||
)
|
||||
if not registries:
|
||||
tags = {
|
||||
alias: openllm.bundle.RefResolver.construct_base_image(alias, version_strategy)
|
||||
for alias in openllm.bundle.CONTAINER_NAMES
|
||||
}
|
||||
else:
|
||||
registries = [registries] if isinstance(registries, str) else list(registries)
|
||||
tags = {name: openllm.bundle.RefResolver.construct_base_image(name, version_strategy) for name in registries}
|
||||
try:
|
||||
outputs = _BUILDER.build(
|
||||
file=pathlib.Path(__file__).parent.joinpath('Dockerfile').resolve().__fspath__(),
|
||||
context_path=pyproject_path.parent.__fspath__(),
|
||||
tag=tuple(tags.values()),
|
||||
push=push,
|
||||
progress='plain' if get_debug_mode() else 'auto',
|
||||
quiet=machine,
|
||||
)
|
||||
if machine and outputs is not None:
|
||||
tags['image_sha'] = outputs.decode('utf-8').strip()
|
||||
except Exception as err:
|
||||
raise openllm.exceptions.OpenLLMException(
|
||||
f'Failed to containerize base container images (Scroll up to see error above, or set DEBUG=5 for more traceback):\n{err}'
|
||||
) from err
|
||||
return tags
|
||||
|
||||
|
||||
@click.command(
|
||||
'build_base_container',
|
||||
context_settings=termui.CONTEXT_SETTINGS,
|
||||
help='''Base image builder for BentoLLM.
|
||||
|
||||
By default, the base image will include custom kernels (PagedAttention via vllm, FlashAttention-v2, etc.) built with CUDA 11.8, Python 3.9 on Ubuntu22.04.
|
||||
Optionally, this can also be pushed directly to remote registry. Currently support ``docker.io``, ``ghcr.io`` and ``quay.io``.
|
||||
|
||||
\b
|
||||
If '--machine' is passed, then it will run the process quietly, and output a JSON to the current running terminal.
|
||||
This command is only useful for debugging and for building custom base image for extending BentoML with custom base images and custom kernels.
|
||||
|
||||
Note that we already release images on our CI to ECR and GHCR, so you don't need to build it yourself.
|
||||
''',
|
||||
)
|
||||
@container_registry_option
|
||||
@click.option(
|
||||
'--version-strategy',
|
||||
type=click.Choice(['release', 'latest', 'nightly']),
|
||||
default='nightly',
|
||||
help='Version strategy to use for tagging the image.',
|
||||
)
|
||||
@click.option('--push/--no-push', help='Whether to push to remote repository', is_flag=True, default=False)
|
||||
@machine_option
|
||||
def cli(
|
||||
container_registry: tuple[LiteralContainerRegistry, ...] | None,
|
||||
version_strategy: LiteralContainerVersionStrategy,
|
||||
push: bool,
|
||||
machine: bool,
|
||||
) -> dict[str, str]:
|
||||
mapping = build_container(container_registry, version_strategy, push, machine)
|
||||
if machine:
|
||||
termui.echo(orjson.dumps(mapping, option=orjson.OPT_INDENT_2).decode(), fg='white')
|
||||
return mapping
|
||||
Reference in New Issue
Block a user