mirror of
https://github.com/bentoml/OpenLLM.git
synced 2026-02-19 07:06:02 -05:00
chore: cleanup unused code path (#633)
we now rely on tokenizer.chat_templates to format prompts correctly Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
This commit is contained in:
@@ -173,20 +173,3 @@ class ModelCard:
|
||||
class ModelList:
|
||||
object: str = 'list'
|
||||
data: t.List[ModelCard] = attr.field(factory=list)
|
||||
|
||||
|
||||
async def get_conversation_prompt(request: ChatCompletionRequest, llm_config: openllm_core.LLMConfig) -> str:
|
||||
conv = llm_config.get_conversation_template()
|
||||
for message in request.messages:
|
||||
msg_role = message['role']
|
||||
if msg_role == 'system':
|
||||
conv.set_system_message(message['content'])
|
||||
elif msg_role == 'user':
|
||||
conv.append_message(conv.roles[0], message['content'])
|
||||
elif msg_role == 'assistant':
|
||||
conv.append_message(conv.roles[1], message['content'])
|
||||
else:
|
||||
raise ValueError(f'Unknown role: {msg_role}')
|
||||
# Add a blank message for the assistant.
|
||||
conv.append_message(conv.roles[1], '')
|
||||
return conv.get_prompt()
|
||||
|
||||
Reference in New Issue
Block a user