From 43a96dab2ca61c775aec9d8c1638732038512cc4 Mon Sep 17 00:00:00 2001 From: Aaron <29749331+aarnphm@users.noreply.github.com> Date: Sun, 26 Nov 2023 02:49:48 -0500 Subject: [PATCH] fix(gpus): disable slots for now to enable cached_property Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> --- openllm-python/src/openllm/_llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openllm-python/src/openllm/_llm.py b/openllm-python/src/openllm/_llm.py index 4b303ee1..9b622697 100644 --- a/openllm-python/src/openllm/_llm.py +++ b/openllm-python/src/openllm/_llm.py @@ -45,7 +45,7 @@ _AdapterTuple: type[AdapterTuple] = codegen.make_attr_tuple_class('AdapterTuple' ResolvedAdapterMap = t.Dict[AdapterType, t.Dict[str, t.Tuple['PeftConfig', str]]] -@attr.define(slots=True, repr=False, init=False) +@attr.define(slots=False, repr=False, init=False) class LLM(t.Generic[M, T]): async def generate( self, prompt, prompt_token_ids=None, stop=None, stop_token_ids=None, request_id=None, adapter_name=None, **attrs