mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-04-23 08:28:24 -04:00
fix: make sure we evolve the attribute from CLI
Signed-off-by: aarnphm-ec2-dev <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
@@ -657,10 +657,15 @@ class LLMConfig:
|
||||
|
||||
# NOTE: we know need to determine the list of the attrs
|
||||
# by mro to at the very least support inheritance. Tho it is not recommended.
|
||||
own_attrs: list[attr.Attribute[t.Any]] = [
|
||||
attr.Attribute.from_counting_attr(name=attr_name, ca=ca, type=anns.get(attr_name))
|
||||
for attr_name, ca in ca_list
|
||||
]
|
||||
own_attrs: list[attr.Attribute[t.Any]] = []
|
||||
for attr_name, ca in ca_list:
|
||||
gen_attribute = attr.Attribute.from_counting_attr(name=attr_name, ca=ca, type=anns.get(attr_name))
|
||||
if attr_name in ca_names:
|
||||
metadata = ca.metadata
|
||||
metadata["env"] = field_env_key(attr_name)
|
||||
gen_attribute = gen_attribute.evolve(metadata=metadata)
|
||||
own_attrs.append(gen_attribute)
|
||||
|
||||
base_attrs, base_attr_map = _collect_base_attrs(cls, {a.name for a in own_attrs})
|
||||
attrs: list[attr.Attribute[t.Any]] = own_attrs + base_attrs
|
||||
|
||||
|
||||
@@ -166,7 +166,6 @@ class OpenLLMCommandGroup(BentoMLCommandGroup):
|
||||
self._cached_grpc: dict[str, t.Any] = {}
|
||||
|
||||
def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None:
|
||||
breakpoint()
|
||||
cmd_name = self.resolve_alias(cmd_name)
|
||||
if ctx.command.name == "start":
|
||||
if cmd_name not in self._cached_http:
|
||||
@@ -289,6 +288,7 @@ def start_model_command(
|
||||
"""
|
||||
from bentoml._internal.configuration import get_debug_mode
|
||||
|
||||
breakpoint()
|
||||
ModelEnv = openllm.utils.ModelEnv(model_name)
|
||||
model_command_decr: dict[str, t.Any] = {"name": ModelEnv.model_name, "context_settings": _context_settings or {}}
|
||||
|
||||
@@ -300,7 +300,6 @@ def start_model_command(
|
||||
aliases.append(llm_config.__openllm_start_name__)
|
||||
model_command_decr.update(
|
||||
{
|
||||
"name": llm_config.__openllm_model_name__,
|
||||
"short_help": f"Start a LLMServer for '{model_name}' ('--help' for more details)",
|
||||
"help": ModelEnv.start_docstring,
|
||||
"aliases": aliases if len(aliases) > 0 else None,
|
||||
@@ -310,7 +309,7 @@ def start_model_command(
|
||||
gpu_available = False
|
||||
try:
|
||||
llm_config.check_if_gpu_is_available(ModelEnv.get_framework_env())
|
||||
gpu_available = True
|
||||
gpu_available = True if llm_config.__openllm_requires_gpu__ else False
|
||||
except openllm.exceptions.GpuNotAvailableError:
|
||||
# NOTE: The model requires GPU, therefore we will return a dummy command
|
||||
model_command_decr.update(
|
||||
|
||||
Reference in New Issue
Block a user