mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
Update prompt adapter
This commit is contained in:
parent
94645dd5f6
commit
4331796692
1 changed files with 8 additions and 4 deletions
|
@ -271,11 +271,15 @@ def chat_completion_request_to_messages(
|
|||
log.error(f"Unsupported inference model? {model.descriptor()}")
|
||||
return request.messages
|
||||
|
||||
if model.model_family == ModelFamily.llama3_1 or (
|
||||
model.model_family == ModelFamily.llama3_2
|
||||
and is_multimodal(model.core_model_id)
|
||||
if (
|
||||
model.model_family == ModelFamily.llama3_1
|
||||
or (
|
||||
model.model_family == ModelFamily.llama3_2
|
||||
and is_multimodal(model.core_model_id)
|
||||
)
|
||||
or model.model_family == ModelFamily.llama3_3
|
||||
):
|
||||
# llama3.1 and llama3.2 multimodal models follow the same tool prompt format
|
||||
# llama3.1, llama3.2 multimodal and llama3.3 models follow the same tool prompt format
|
||||
messages = augment_messages_for_tools_llama_3_1(request)
|
||||
elif model.model_family == ModelFamily.llama3_2:
|
||||
messages = augment_messages_for_tools_llama_3_2(request)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue