mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
Move to 3_2 format
This commit is contained in:
parent
4331796692
commit
c20f342cde
1 changed files with 4 additions and 8 deletions
|
@ -271,17 +271,13 @@ def chat_completion_request_to_messages(
|
|||
log.error(f"Unsupported inference model? {model.descriptor()}")
|
||||
return request.messages
|
||||
|
||||
if (
|
||||
model.model_family == ModelFamily.llama3_1
|
||||
or (
|
||||
model.model_family == ModelFamily.llama3_2
|
||||
and is_multimodal(model.core_model_id)
|
||||
)
|
||||
or model.model_family == ModelFamily.llama3_3
|
||||
if model.model_family == ModelFamily.llama3_1 or (
|
||||
model.model_family == ModelFamily.llama3_2
|
||||
and is_multimodal(model.core_model_id)
|
||||
):
|
||||
# llama3.1, llama3.2 multimodal and llama3.3 models follow the same tool prompt format
|
||||
messages = augment_messages_for_tools_llama_3_1(request)
|
||||
elif model.model_family == ModelFamily.llama3_2:
|
||||
elif model.model_family in (ModelFamily.llama3_2, ModelFamily.llama3_3):
|
||||
messages = augment_messages_for_tools_llama_3_2(request)
|
||||
else:
|
||||
messages = request.messages
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue