mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
fix: pass tool_prompt_format to chat_formatter (#1198)
Summary: Need this to format the completion message with tool_calls correctly. See added unittest. Test Plan: python -m unittest llama_stack.providers.tests.inference.test_prompt_adapter
This commit is contained in:
parent
33a64eb5ec
commit
cfa752fc92
2 changed files with 50 additions and 2 deletions
|
@ -252,7 +252,9 @@ async def chat_completion_request_to_prompt(request: ChatCompletionRequest, llam
|
|||
request = await convert_request_to_raw(request)
|
||||
|
||||
formatter = ChatFormat(tokenizer=Tokenizer.get_instance())
|
||||
model_input = formatter.encode_dialog_prompt(request.messages)
|
||||
model_input = formatter.encode_dialog_prompt(
|
||||
request.messages, tool_prompt_format=request.tool_config.tool_prompt_format
|
||||
)
|
||||
return formatter.tokenizer.decode(model_input.tokens)
|
||||
|
||||
|
||||
|
@ -264,7 +266,9 @@ async def chat_completion_request_to_model_input_info(
|
|||
request = await convert_request_to_raw(request)
|
||||
|
||||
formatter = ChatFormat(tokenizer=Tokenizer.get_instance())
|
||||
model_input = formatter.encode_dialog_prompt(request.messages)
|
||||
model_input = formatter.encode_dialog_prompt(
|
||||
request.messages, tool_prompt_format=request.tool_config.tool_prompt_format
|
||||
)
|
||||
return (
|
||||
formatter.tokenizer.decode(model_input.tokens),
|
||||
len(model_input.tokens),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue