llama-stack-mirror/llama_stack/models/llama/llama3/template_data.py
Hardik Shah 65ca85ba6b
fix: Updating ToolCall.arguments to allow for json strings that can be decoded on client side (#1685)
### What does this PR do?

Currently, `ToolCall.arguments` is a `Dict[str, RecursiveType]`.
However, on the client SDK side -- the `RecursiveType` gets deserialized
into a number ( both int and float get collapsed ) and hence when params
are `int` they get converted to float which might break client side
tools that might be doing type checking.

Closes: https://github.com/meta-llama/llama-stack/issues/1683

### Test Plan
Stainless changes --
https://github.com/meta-llama/llama-stack-client-python/pull/204
```
pytest -s -v --stack-config=fireworks tests/integration/agents/test_agents.py  --text-model meta-llama/Llama-3.1-8B-Instruct
```
2025-03-19 10:36:19 -07:00

117 lines
2.9 KiB
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# top-level folder for each specific model found within the models/ directory at
# the top-level of this source tree.
from llama_stack.models.llama.datatypes import BuiltinTool, StopReason, ToolCall
from .prompt_templates import (
BuiltinToolGenerator,
JsonCustomToolGenerator,
ToolResponseGenerator,
)
INSTRUCTION = "You are a helpful assistant."
def system_message_builtin_tools_only():
return {
"builtin_tools": BuiltinToolGenerator().data_examples()[0],
"custom_tools": [],
"instruction": INSTRUCTION,
}
def system_message_builtin_code_only():
return {
"builtin_tools": BuiltinToolGenerator().data_examples()[1],
"custom_tools": [],
"instruction": "",
}
def system_message_custom_tools_only():
return {
"builtin_tools": [],
"custom_tools": JsonCustomToolGenerator().data_examples()[0],
"instruction": INSTRUCTION,
}
def system_message_builtin_and_custom_tools():
return {
"builtin_tools": BuiltinToolGenerator().data_examples()[0],
"custom_tools": JsonCustomToolGenerator().data_examples()[0],
"instruction": INSTRUCTION,
}
def system_default():
return {
"builtin_tools": [],
"custom_tools": [],
"instruction": INSTRUCTION,
}
def tool_success():
return ToolResponseGenerator().data_examples()[0]
def tool_failure():
return ToolResponseGenerator().data_examples()[1]
def assistant_builtin_tool_call():
return {
"content": "",
"tool_call": ToolCall(
call_id="uuid",
tool_name=BuiltinTool.brave_search,
arguments={
"query": "Who won NBA in 2024?",
},
),
"stop_reason": StopReason.end_of_message,
}
def assistant_custom_tool_call():
return {
"content": "",
"tool_call": ToolCall(
call_id="uuid",
tool_name="trending_songs",
arguments={"country": "US", "n": 10},
),
"stop_reason": StopReason.end_of_turn,
}
def assistant_default():
return {
"content": "Hi, I am a helpful assistant. What can I help you with today?",
"tool_call": None,
"stop_reason": StopReason.end_of_turn,
}
def user_default():
return {"content": "Please tell me how to plan a trip to New York"}
def user_images():
return {"content": "<|image|><|image|>What do these images depict?"}
def user_interleaved_images():
return {"content": "<|image|>Describe the image in one sentence.<|image|>Write a haiku about these images"}