From b9ff4ab92a37eaca02698404b7a8bade2bd96082 Mon Sep 17 00:00:00 2001 From: Aaron <29749331+aarnphm@users.noreply.github.com> Date: Thu, 15 Jun 2023 18:39:33 -0400 Subject: [PATCH] chore: flatten examples llm-config Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com> --- src/openllm/_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/openllm/_service.py b/src/openllm/_service.py index 58d07053..47ca759b 100644 --- a/src/openllm/_service.py +++ b/src/openllm/_service.py @@ -40,8 +40,8 @@ svc = bentoml.Service(name=f"llm-{llm_config.__openllm_start_name__}-service", r @svc.api( - input=bentoml.io.JSON.from_sample(sample={"prompt": "", "llm_config": llm_config.model_dump()}), - output=bentoml.io.JSON.from_sample(sample={"responses": [], "configuration": llm_config.model_dump()}), + input=bentoml.io.JSON.from_sample(sample={"prompt": "", "llm_config": llm_config.model_dump(flatten=True)}), + output=bentoml.io.JSON.from_sample(sample={"responses": [], "configuration": llm_config.model_dump(flatten=True)}), route="/v1/generate", ) async def generate_v1(input_dict: dict[str, t.Any]) -> openllm.GenerationOutput: