mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-06 20:44:58 +00:00
add tools to chat completion request
This commit is contained in:
parent
9777639a1c
commit
68855ed218
26 changed files with 558 additions and 226 deletions
|
@ -110,35 +110,6 @@ class Session(BaseModel):
|
|||
started_at: datetime
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ToolPromptFormat(Enum):
|
||||
"""This Enum refers to the prompt format for calling zero shot tools
|
||||
|
||||
`json` --
|
||||
Refers to the json format for calling tools.
|
||||
The json format takes the form like
|
||||
{
|
||||
"type": "function",
|
||||
"function" : {
|
||||
"name": "function_name",
|
||||
"description": "function_description",
|
||||
"parameters": {...}
|
||||
}
|
||||
}
|
||||
|
||||
`function_tag` --
|
||||
This is an example of how you could define
|
||||
your own user defined format for making tool calls.
|
||||
The function_tag format looks like this,
|
||||
<function=function_name>(parameters)</function>
|
||||
|
||||
The detailed prompts for each of these formats are defined in `system_prompt.py`
|
||||
"""
|
||||
|
||||
json = "json"
|
||||
function_tag = "function_tag"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class AgenticSystemInstanceConfig(BaseModel):
|
||||
instructions: str
|
||||
|
|
|
@ -56,10 +56,10 @@ from llama_toolchain.safety.api.datatypes import (
|
|||
)
|
||||
from llama_toolchain.agentic_system.api.endpoints import * # noqa
|
||||
|
||||
from llama_toolchain.tools.base import BaseTool
|
||||
from llama_toolchain.tools.builtin import SingleMessageBuiltinTool
|
||||
|
||||
from .safety import SafetyException, ShieldRunnerMixin
|
||||
from .system_prompt import get_agentic_prefix_messages
|
||||
from .tools.base import BaseTool
|
||||
from .tools.builtin import SingleMessageBuiltinTool
|
||||
|
||||
|
||||
class AgentInstance(ShieldRunnerMixin):
|
||||
|
@ -85,18 +85,6 @@ class AgentInstance(ShieldRunnerMixin):
|
|||
self.inference_api = inference_api
|
||||
self.safety_api = safety_api
|
||||
|
||||
if prefix_messages is not None and len(prefix_messages) > 0:
|
||||
self.prefix_messages = prefix_messages
|
||||
else:
|
||||
self.prefix_messages = get_agentic_prefix_messages(
|
||||
builtin_tools,
|
||||
custom_tool_definitions,
|
||||
tool_prompt_format,
|
||||
)
|
||||
|
||||
for m in self.prefix_messages:
|
||||
print(m.content)
|
||||
|
||||
self.max_infer_iters = max_infer_iters
|
||||
self.tools_dict = {t.get_name(): t for t in builtin_tools}
|
||||
|
||||
|
@ -344,7 +332,7 @@ class AgentInstance(ShieldRunnerMixin):
|
|||
stream: bool = False,
|
||||
max_gen_len: Optional[int] = None,
|
||||
) -> AsyncGenerator:
|
||||
input_messages = preprocess_dialog(input_messages, self.prefix_messages)
|
||||
input_messages = preprocess_dialog(input_messages)
|
||||
|
||||
attachments = []
|
||||
|
||||
|
@ -373,7 +361,8 @@ class AgentInstance(ShieldRunnerMixin):
|
|||
req = ChatCompletionRequest(
|
||||
model=self.model,
|
||||
messages=input_messages,
|
||||
available_tools=self.instance_config.available_tools,
|
||||
tools=self.instance_config.available_tools,
|
||||
tool_prompt_format=self.instance_config.tool_prompt_format,
|
||||
stream=True,
|
||||
sampling_params=SamplingParams(
|
||||
temperature=temperature,
|
||||
|
@ -601,14 +590,12 @@ def attachment_message(url: URL) -> ToolResponseMessage:
|
|||
)
|
||||
|
||||
|
||||
def preprocess_dialog(
|
||||
messages: List[Message], prefix_messages: List[Message]
|
||||
) -> List[Message]:
|
||||
def preprocess_dialog(messages: List[Message]) -> List[Message]:
|
||||
"""
|
||||
Preprocesses the dialog by removing the system message and
|
||||
adding the system message to the beginning of the dialog.
|
||||
"""
|
||||
ret = prefix_messages.copy()
|
||||
ret = []
|
||||
|
||||
for m in messages:
|
||||
if m.role == Role.system.value:
|
||||
|
|
|
@ -24,17 +24,17 @@ from llama_toolchain.agentic_system.api import (
|
|||
AgenticSystemTurnCreateRequest,
|
||||
)
|
||||
|
||||
from .agent_instance import AgentInstance
|
||||
|
||||
from .config import AgenticSystemConfig
|
||||
|
||||
from .tools.builtin import (
|
||||
from llama_toolchain.tools.builtin import (
|
||||
BraveSearchTool,
|
||||
CodeInterpreterTool,
|
||||
PhotogenTool,
|
||||
WolframAlphaTool,
|
||||
)
|
||||
from .tools.safety import with_safety
|
||||
from llama_toolchain.tools.safety import with_safety
|
||||
|
||||
from .agent_instance import AgentInstance
|
||||
|
||||
from .config import AgenticSystemConfig
|
||||
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
|
|
@ -18,7 +18,7 @@ from llama_toolchain.agentic_system.api import (
|
|||
from llama_toolchain.agentic_system.api.datatypes import ToolPromptFormat
|
||||
from llama_toolchain.agentic_system.client import AgenticSystemClient
|
||||
|
||||
from llama_toolchain.agentic_system.tools.custom.execute import (
|
||||
from llama_toolchain.agentic_system.meta_reference.execute_with_custom_tools import (
|
||||
execute_with_custom_tools,
|
||||
)
|
||||
from llama_toolchain.safety.api.datatypes import BuiltinShield, ShieldDefinition
|
||||
|
|
|
@ -15,6 +15,41 @@ from typing_extensions import Annotated
|
|||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ToolChoice(Enum):
|
||||
auto = "auto"
|
||||
required = "required"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ToolPromptFormat(Enum):
|
||||
"""This Enum refers to the prompt format for calling zero shot tools
|
||||
|
||||
`json` --
|
||||
Refers to the json format for calling tools.
|
||||
The json format takes the form like
|
||||
{
|
||||
"type": "function",
|
||||
"function" : {
|
||||
"name": "function_name",
|
||||
"description": "function_description",
|
||||
"parameters": {...}
|
||||
}
|
||||
}
|
||||
|
||||
`function_tag` --
|
||||
This is an example of how you could define
|
||||
your own user defined format for making tool calls.
|
||||
The function_tag format looks like this,
|
||||
<function=function_name>(parameters)</function>
|
||||
|
||||
The detailed prompts for each of these formats are defined in `system_prompt.py`
|
||||
"""
|
||||
|
||||
json = "json"
|
||||
function_tag = "function_tag"
|
||||
|
||||
|
||||
class LogProbConfig(BaseModel):
|
||||
top_k: Optional[int] = 0
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
from .datatypes import * # noqa: F403
|
||||
from typing import Optional, Protocol
|
||||
|
||||
from llama_models.llama3.api.datatypes import ToolDefinition
|
||||
|
||||
# this dependency is annoying and we need a forked up version anyway
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
|
@ -56,7 +58,11 @@ class ChatCompletionRequest(BaseModel):
|
|||
sampling_params: Optional[SamplingParams] = SamplingParams()
|
||||
|
||||
# zero-shot tool definitions as input to the model
|
||||
available_tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
tool_choice: Optional[ToolChoice] = Field(default=ToolChoice.auto)
|
||||
tool_prompt_format: Optional[ToolPromptFormat] = Field(
|
||||
default=ToolPromptFormat.json
|
||||
)
|
||||
|
||||
stream: Optional[bool] = False
|
||||
logprobs: Optional[LogProbConfig] = None
|
||||
|
@ -82,8 +88,11 @@ class BatchChatCompletionRequest(BaseModel):
|
|||
sampling_params: Optional[SamplingParams] = SamplingParams()
|
||||
|
||||
# zero-shot tool definitions as input to the model
|
||||
available_tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
|
||||
tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
tool_choice: Optional[ToolChoice] = Field(default=ToolChoice.auto)
|
||||
tool_prompt_format: Optional[ToolPromptFormat] = Field(
|
||||
default=ToolPromptFormat.json
|
||||
)
|
||||
logprobs: Optional[LogProbConfig] = None
|
||||
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ from llama_toolchain.inference.api import (
|
|||
ToolCallDelta,
|
||||
ToolCallParseStatus,
|
||||
)
|
||||
|
||||
from llama_toolchain.inference.prepare_messages import prepare_messages_for_tools
|
||||
from .config import MetaReferenceImplConfig
|
||||
from .model_parallel import LlamaModelParallelGenerator
|
||||
|
||||
|
@ -67,6 +67,7 @@ class MetaReferenceInferenceImpl(Inference):
|
|||
) -> AsyncIterator[
|
||||
Union[ChatCompletionResponseStreamChunk, ChatCompletionResponse]
|
||||
]:
|
||||
request = prepare_messages_for_tools(request)
|
||||
model = resolve_model(request.model)
|
||||
if model is None:
|
||||
raise RuntimeError(
|
||||
|
|
|
@ -32,7 +32,7 @@ from llama_toolchain.inference.api import (
|
|||
ToolCallDelta,
|
||||
ToolCallParseStatus,
|
||||
)
|
||||
|
||||
from llama_toolchain.inference.prepare_messages import prepare_messages_for_tools
|
||||
from .config import OllamaImplConfig
|
||||
|
||||
# TODO: Eventually this will move to the llama cli model list command
|
||||
|
@ -111,6 +111,7 @@ class OllamaInference(Inference):
|
|||
return options
|
||||
|
||||
async def chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
|
||||
request = prepare_messages_for_tools(request)
|
||||
# accumulate sampling params and other options to pass to ollama
|
||||
options = self.get_ollama_chat_options(request)
|
||||
ollama_model = self.resolve_ollama_model(request.model)
|
||||
|
|
|
@ -1,70 +1,90 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import json
|
||||
import os
|
||||
import textwrap
|
||||
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
||||
from llama_toolchain.agentic_system.api.datatypes import ToolPromptFormat
|
||||
|
||||
from llama_toolchain.inference.api import (
|
||||
BuiltinTool,
|
||||
Message,
|
||||
SystemMessage,
|
||||
ToolDefinition,
|
||||
UserMessage,
|
||||
from llama_toolchain.inference.api import * # noqa: F403
|
||||
from llama_toolchain.tools.builtin import (
|
||||
BraveSearchTool,
|
||||
CodeInterpreterTool,
|
||||
PhotogenTool,
|
||||
WolframAlphaTool,
|
||||
)
|
||||
|
||||
from .tools.builtin import SingleMessageBuiltinTool
|
||||
|
||||
def tool_breakdown(tools: List[ToolDefinition]) -> str:
|
||||
builtin_tools, custom_tools = [], []
|
||||
for dfn in tools:
|
||||
if isinstance(dfn.tool_name, BuiltinTool):
|
||||
builtin_tools.append(dfn)
|
||||
else:
|
||||
custom_tools.append(dfn)
|
||||
|
||||
return builtin_tools, custom_tools
|
||||
|
||||
|
||||
def get_agentic_prefix_messages(
|
||||
builtin_tools: List[SingleMessageBuiltinTool],
|
||||
custom_tools: List[ToolDefinition],
|
||||
tool_prompt_format: ToolPromptFormat,
|
||||
) -> List[Message]:
|
||||
def prepare_messages_for_tools(request: ChatCompletionRequest) -> ChatCompletionRequest:
|
||||
"""This functions takes a ChatCompletionRequest and returns an augmented request.
|
||||
The request's messages are augmented to update the system message
|
||||
corresponding to the tool definitions provided in the request.
|
||||
"""
|
||||
assert request.tool_choice == ToolChoice.auto, "Only `ToolChoice.auto` supported"
|
||||
|
||||
existing_messages = request.messages
|
||||
|
||||
existing_system_message = None
|
||||
if existing_messages[0].role == Role.system.value:
|
||||
existing_system_message = existing_messages.pop(0)
|
||||
|
||||
builtin_tools, custom_tools = tool_breakdown(request.tools)
|
||||
|
||||
messages = []
|
||||
content = ""
|
||||
if builtin_tools:
|
||||
if builtin_tools or custom_tools:
|
||||
content += "Environment: ipython\n"
|
||||
|
||||
if builtin_tools:
|
||||
tool_str = ", ".join(
|
||||
[
|
||||
t.get_name()
|
||||
t.tool_name.value
|
||||
for t in builtin_tools
|
||||
if t.get_name() != BuiltinTool.code_interpreter.value
|
||||
if t.tool_name != BuiltinTool.code_interpreter
|
||||
]
|
||||
)
|
||||
if tool_str:
|
||||
content += f"Tools: {tool_str}"
|
||||
content += f"Tools: {tool_str}\n"
|
||||
|
||||
current_date = datetime.now()
|
||||
formatted_date = current_date.strftime("%d %B %Y")
|
||||
date_str = f"""
|
||||
Cutting Knowledge Date: December 2023
|
||||
Today Date: {formatted_date}\n"""
|
||||
content += date_str
|
||||
date_str = textwrap.dedent(
|
||||
f"""
|
||||
Cutting Knowledge Date: December 2023
|
||||
Today Date: {formatted_date}
|
||||
"""
|
||||
)
|
||||
content += date_str.lstrip("\n")
|
||||
|
||||
if existing_system_message:
|
||||
content += "\n"
|
||||
content += existing_system_message.content
|
||||
|
||||
messages.append(SystemMessage(content=content))
|
||||
|
||||
if custom_tools:
|
||||
if tool_prompt_format == ToolPromptFormat.function_tag:
|
||||
if request.tool_prompt_format == ToolPromptFormat.function_tag:
|
||||
text = prompt_for_function_tag(custom_tools)
|
||||
messages.append(UserMessage(content=text))
|
||||
elif tool_prompt_format == ToolPromptFormat.json:
|
||||
elif request.tool_prompt_format == ToolPromptFormat.json:
|
||||
text = prompt_for_json(custom_tools)
|
||||
messages.append(UserMessage(content=text))
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Tool prompt format {tool_prompt_format} is not supported"
|
||||
)
|
||||
else:
|
||||
messages.append(SystemMessage(content=content))
|
||||
|
||||
return messages
|
||||
messages += existing_messages
|
||||
request.messages = messages
|
||||
return request
|
||||
|
||||
|
||||
def prompt_for_json(custom_tools: List[ToolDefinition]) -> str:
|
||||
|
@ -91,23 +111,26 @@ def prompt_for_function_tag(custom_tools: List[ToolDefinition]) -> str:
|
|||
custom_tool_params += get_instruction_string(t) + "\n"
|
||||
custom_tool_params += get_parameters_string(t) + "\n\n"
|
||||
|
||||
content = f"""
|
||||
You have access to the following functions:
|
||||
content = textwrap.dedent(
|
||||
"""
|
||||
You have access to the following functions:
|
||||
|
||||
{custom_tool_params}
|
||||
Think very carefully before calling functions.
|
||||
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
||||
{custom_tool_params}
|
||||
Think very carefully before calling functions.
|
||||
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
||||
|
||||
<function=example_function_name>{{"example_name": "example_value"}}</function>
|
||||
<function=example_function_name>{{"example_name": "example_value"}}</function>
|
||||
|
||||
Reminder:
|
||||
- If looking for real time information use relevant functions before falling back to brave_search
|
||||
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
||||
- Required parameters MUST be specified
|
||||
- Only call one function at a time
|
||||
- Put the entire function call reply on one line
|
||||
"""
|
||||
return content
|
||||
Reminder:
|
||||
- If looking for real time information use relevant functions before falling back to brave_search
|
||||
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
||||
- Required parameters MUST be specified
|
||||
- Only call one function at a time
|
||||
- Put the entire function call reply on one line
|
||||
"""
|
||||
)
|
||||
|
||||
return content.lstrip("\n").format(custom_tool_params=custom_tool_params)
|
||||
|
||||
|
||||
def get_instruction_string(custom_tool_definition) -> str:
|
|
@ -13,9 +13,7 @@ from llama_models.llama3.api.datatypes import * # noqa: F403
|
|||
from llama_toolchain.agentic_system.api import * # noqa: F403
|
||||
|
||||
# TODO: this is symptomatic of us needing to pull more tooling related utilities
|
||||
from llama_toolchain.agentic_system.meta_reference.tools.builtin import (
|
||||
interpret_content_as_attachment,
|
||||
)
|
||||
from llama_toolchain.tools.builtin import interpret_content_as_attachment
|
||||
|
||||
|
||||
class CustomTool:
|
Loading…
Add table
Add a link
Reference in a new issue