mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-31 02:40:02 +00:00
# What does this PR do?
This PR proposes updates to the tools API in Inference and Agent.
Goals:
1. Agent's tool specification should be consistent with Inference's tool spec, but with add-ons.
2. Formal types should be defined for built in tools. Currently Agent tools args are untyped, e.g. how does one know that `builtin::rag_tool` takes a `vector_db_ids` param or even how to know 'builtin::rag_tool' is even available (in code, outside of docs)?
Inference:
1. BuiltinTool is to be removed and replaced by a formal `type` parameter.
2. 'brave_search' is replaced by 'web_search' to be more generic. It will still be translated back to brave_search when the prompt is constructed to be consistent with model training.
3. I'm not sure what `photogen` is. Maybe it can be removed?
Agent:
1. Uses the same format as in Inference for builtin tools.
2. New tools types are added, i.e. knowledge_sesarch (currently rag_tool), and MCP tool.
3. Toolgroup as a concept will be removed since it's really only used for MCP.
4. Instead MCPTool is its own type and available tools provided by the server will be expanded by default. Users can specify a subset of tool names if desired.
Example snippet:
```
agent = Agent(
client,
model=model_id,
instructions="You are a helpful assistant. Use the tools you have access to for providing relevant answers.",
tools=[
KnowledgeSearchTool(vector_store_id="1234"),
KnowledgeSearchTool(vector_store_id="5678", name="paper_search", description="Search research papers"),
KnowledgeSearchTool(vector_store_id="1357", name="wiki_search", description="Search wiki pages"),
# no need to register toolgroup, just pass in the server uri
# all available tools will be used
MCPTool(server_uri="http://localhost:8000/sse"),
# can specify a subset of available tools
MCPTool(server_uri="http://localhost:8000/sse", tool_names=["list_directory"]),
MCPTool(server_uri="http://localhost:8000/sse", tool_names=["list_directory"]),
# custom tool
my_custom_tool,
]
)
```
## Test Plan
# What does this PR do?
## Test Plan
# What does this PR do?
## Test Plan
228 lines
6.1 KiB
Python
228 lines
6.1 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# top-level folder for each specific model found within the models/ directory at
|
|
# the top-level of this source tree.
|
|
|
|
from pathlib import Path
|
|
from typing import List, Optional
|
|
|
|
from termcolor import colored
|
|
|
|
from llama_stack.models.llama.datatypes import (
|
|
FunctionTool,
|
|
RawMessage,
|
|
StopReason,
|
|
ToolCall,
|
|
ToolDefinition,
|
|
ToolPromptFormat,
|
|
)
|
|
|
|
from .chat_format import ChatFormat
|
|
from .prompt_templates import (
|
|
BuiltinToolGenerator,
|
|
FunctionTagCustomToolGenerator,
|
|
JsonCustomToolGenerator,
|
|
SystemDefaultGenerator,
|
|
ToolResponseGenerator,
|
|
)
|
|
from .tokenizer import Tokenizer
|
|
|
|
THIS_DIR = Path(__file__).parent
|
|
|
|
|
|
class Template:
|
|
def __init__(
|
|
self,
|
|
role,
|
|
template_name,
|
|
data_provider=None,
|
|
notes=None,
|
|
):
|
|
self.role = role
|
|
self.template_name = template_name
|
|
self.data_provider = data_provider or ""
|
|
self._notes = notes or ""
|
|
|
|
@property
|
|
def notes(self):
|
|
default = "↵ represents newline"
|
|
notes = default
|
|
if self._notes:
|
|
notes += "\n"
|
|
notes += self._notes
|
|
return notes
|
|
|
|
|
|
TEMPLATES = [
|
|
Template(
|
|
"user",
|
|
"user-default",
|
|
"user_default",
|
|
),
|
|
Template(
|
|
"user",
|
|
"user-images",
|
|
"user_images",
|
|
),
|
|
Template("user", "user-interleaved-images", "user_interleaved_images"),
|
|
Template(
|
|
"assistant",
|
|
"assistant-builtin-tool-call",
|
|
"assistant_builtin_tool_call",
|
|
"Notice <|python_tag|>",
|
|
),
|
|
Template(
|
|
"assistant",
|
|
"assistant-custom-tool-call",
|
|
"assistant_custom_tool_call",
|
|
"Notice <function=...> format",
|
|
),
|
|
Template(
|
|
"assistant",
|
|
"assistant-default",
|
|
"assistant_default",
|
|
),
|
|
Template(
|
|
"system",
|
|
"system-builtin-and-custom-tools",
|
|
"system_message_builtin_and_custom_tools",
|
|
),
|
|
Template(
|
|
"system",
|
|
"system-builtin-tools-only",
|
|
"system_message_builtin_tools_only",
|
|
),
|
|
Template(
|
|
"system",
|
|
"system-custom-tools-only",
|
|
"system_message_custom_tools_only",
|
|
),
|
|
Template(
|
|
"system",
|
|
"system-default",
|
|
"system_default",
|
|
),
|
|
Template(
|
|
"tool",
|
|
"tool-success",
|
|
"tool_success",
|
|
"Note ipython header and [stdout]",
|
|
),
|
|
Template(
|
|
"tool",
|
|
"tool-failure",
|
|
"tool_failure",
|
|
"Note ipython header and [stderr]",
|
|
),
|
|
]
|
|
|
|
|
|
class LLama31Interface:
|
|
def __init__(self, tool_prompt_format: ToolPromptFormat = ToolPromptFormat.json):
|
|
self.tokenizer = Tokenizer.get_instance()
|
|
self.formatter = ChatFormat(self.tokenizer)
|
|
self.tool_prompt_format = tool_prompt_format
|
|
|
|
def get_tokens(self, messages: List[RawMessage]) -> List[int]:
|
|
model_input = self.formatter.encode_dialog_prompt(
|
|
messages,
|
|
self.tool_prompt_format,
|
|
)
|
|
return model_input.tokens
|
|
|
|
def tool_response_messages(self, *args, **kwargs):
|
|
template = ToolResponseGenerator().gen(*args, **kwargs)
|
|
return [
|
|
RawMessage(
|
|
role="tool",
|
|
content=template.render(),
|
|
)
|
|
]
|
|
|
|
def system_messages(
|
|
self,
|
|
builtin_tools: List[ToolDefinition],
|
|
custom_tools: List[FunctionTool],
|
|
instruction: Optional[str] = None,
|
|
) -> List[RawMessage]:
|
|
messages = []
|
|
|
|
default_gen = SystemDefaultGenerator()
|
|
default_template = default_gen.gen()
|
|
|
|
sys_content = ""
|
|
|
|
tool_template = None
|
|
if builtin_tools or custom_tools:
|
|
tool_gen = BuiltinToolGenerator()
|
|
tool_template = tool_gen.gen(builtin_tools + custom_tools)
|
|
|
|
sys_content += tool_template.render()
|
|
sys_content += "\n"
|
|
|
|
sys_content += default_template.render()
|
|
|
|
if instruction:
|
|
sys_content += "\n\n"
|
|
sys_content += instruction
|
|
|
|
sys_content += "\n"
|
|
messages.append(RawMessage(role="system", content=sys_content))
|
|
|
|
if custom_tools:
|
|
if self.tool_prompt_format == ToolPromptFormat.json:
|
|
tool_gen = JsonCustomToolGenerator()
|
|
elif self.tool_prompt_format == ToolPromptFormat.function_tag:
|
|
tool_gen = FunctionTagCustomToolGenerator()
|
|
else:
|
|
raise ValueError(f"Non supported ToolPromptFormat {self.tool_prompt_format}")
|
|
|
|
custom_template = tool_gen.gen(custom_tools)
|
|
messages.append(RawMessage(role="user", content=custom_template.render()))
|
|
|
|
return messages
|
|
|
|
def assistant_response_messages(
|
|
self,
|
|
content: str,
|
|
stop_reason: StopReason,
|
|
tool_call: Optional[ToolCall] = None,
|
|
) -> List[RawMessage]:
|
|
tool_calls = []
|
|
if tool_call:
|
|
tool_calls.append(tool_call)
|
|
return [
|
|
RawMessage(
|
|
role="assistant",
|
|
content=content,
|
|
tool_calls=tool_calls,
|
|
stop_reason=stop_reason,
|
|
)
|
|
]
|
|
|
|
def user_message(self, content: str) -> List[RawMessage]:
|
|
return [RawMessage(role="user", content=content)]
|
|
|
|
def display_message_as_tokens(self, message: RawMessage) -> None:
|
|
"""Util to print tokenized string to shell"""
|
|
tokens = self.formatter.encode_message(message, self.tool_prompt_format)
|
|
on_colors = [
|
|
"on_red",
|
|
"on_green",
|
|
"on_yellow",
|
|
"on_blue",
|
|
"on_magenta",
|
|
"on_cyan",
|
|
]
|
|
for i, t in enumerate(tokens):
|
|
on_col = on_colors[i % len(on_colors)]
|
|
print(colored(self.tokenizer.decode([t]), "white", on_col), end="")
|
|
print("\n", end="")
|