more substantial cleanup of Tool vs. ToolDef crap

This commit is contained in:
Ashwin Bharambe 2025-10-01 15:54:14 -07:00
parent fa6ed28aea
commit 6749c853c0
34 changed files with 2676 additions and 615 deletions

View file

@ -2394,11 +2394,11 @@
"get": {
"responses": {
"200": {
"description": "A Tool.",
"description": "A ToolDef.",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Tool"
"$ref": "#/components/schemas/ToolDef"
}
}
}
@ -4447,11 +4447,11 @@
"get": {
"responses": {
"200": {
"description": "A ListToolsResponse.",
"description": "A ListToolDefsResponse.",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ListToolsResponse"
"$ref": "#/components/schemas/ListToolDefsResponse"
}
}
}
@ -7375,6 +7375,10 @@
"ToolDef": {
"type": "object",
"properties": {
"toolgroup_id": {
"type": "string",
"description": "(Optional) ID of the tool group this tool belongs to"
},
"name": {
"type": "string",
"description": "Name of the tool"
@ -8071,79 +8075,6 @@
]
},
"arguments": {
"oneOf": [
{
"type": "string"
},
{
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "string"
},
{
"type": "integer"
},
{
"type": "number"
},
{
"type": "boolean"
},
{
"type": "null"
},
{
"type": "array",
"items": {
"oneOf": [
{
"type": "string"
},
{
"type": "integer"
},
{
"type": "number"
},
{
"type": "boolean"
},
{
"type": "null"
}
]
}
},
{
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "string"
},
{
"type": "integer"
},
{
"type": "number"
},
{
"type": "boolean"
},
{
"type": "null"
}
]
}
}
]
}
}
]
},
"arguments_json": {
"type": "string"
}
},
@ -13032,133 +12963,6 @@
"title": "QuerySpanTreeResponse",
"description": "Response containing a tree structure of spans."
},
"Tool": {
"type": "object",
"properties": {
"identifier": {
"type": "string"
},
"provider_resource_id": {
"type": "string"
},
"provider_id": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"model",
"shield",
"vector_db",
"dataset",
"scoring_function",
"benchmark",
"tool",
"tool_group",
"prompt"
],
"const": "tool",
"default": "tool",
"description": "Type of resource, always 'tool'"
},
"toolgroup_id": {
"type": "string",
"description": "ID of the tool group this tool belongs to"
},
"description": {
"type": "string",
"description": "Human-readable description of what the tool does"
},
"input_schema": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "JSON Schema for the tool's input parameters"
},
"output_schema": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "JSON Schema for the tool's output"
},
"metadata": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "(Optional) Additional metadata about the tool"
}
},
"additionalProperties": false,
"required": [
"identifier",
"provider_id",
"type",
"toolgroup_id",
"description"
],
"title": "Tool",
"description": "A tool that can be invoked by agents."
},
"ToolGroup": {
"type": "object",
"properties": {
@ -14437,24 +14241,6 @@
"title": "ListToolGroupsResponse",
"description": "Response containing a list of tool groups."
},
"ListToolsResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Tool"
},
"description": "List of tools"
}
},
"additionalProperties": false,
"required": [
"data"
],
"title": "ListToolsResponse",
"description": "Response containing a list of tools."
},
"ListVectorDBsResponse": {
"type": "object",
"properties": {

View file

@ -1674,11 +1674,11 @@ paths:
get:
responses:
'200':
description: A Tool.
description: A ToolDef.
content:
application/json:
schema:
$ref: '#/components/schemas/Tool'
$ref: '#/components/schemas/ToolDef'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@ -3154,11 +3154,11 @@ paths:
get:
responses:
'200':
description: A ListToolsResponse.
description: A ListToolDefsResponse.
content:
application/json:
schema:
$ref: '#/components/schemas/ListToolsResponse'
$ref: '#/components/schemas/ListToolDefsResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
@ -5315,6 +5315,10 @@ components:
ToolDef:
type: object
properties:
toolgroup_id:
type: string
description: >-
(Optional) ID of the tool group this tool belongs to
name:
type: string
description: Name of the tool
@ -5842,33 +5846,6 @@ components:
title: BuiltinTool
- type: string
arguments:
oneOf:
- type: string
- type: object
additionalProperties:
oneOf:
- type: string
- type: integer
- type: number
- type: boolean
- type: 'null'
- type: array
items:
oneOf:
- type: string
- type: integer
- type: number
- type: boolean
- type: 'null'
- type: object
additionalProperties:
oneOf:
- type: string
- type: integer
- type: number
- type: boolean
- type: 'null'
arguments_json:
type: string
additionalProperties: false
required:
@ -9582,82 +9559,6 @@ components:
title: QuerySpanTreeResponse
description: >-
Response containing a tree structure of spans.
Tool:
type: object
properties:
identifier:
type: string
provider_resource_id:
type: string
provider_id:
type: string
type:
type: string
enum:
- model
- shield
- vector_db
- dataset
- scoring_function
- benchmark
- tool
- tool_group
- prompt
const: tool
default: tool
description: Type of resource, always 'tool'
toolgroup_id:
type: string
description: >-
ID of the tool group this tool belongs to
description:
type: string
description: >-
Human-readable description of what the tool does
input_schema:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
JSON Schema for the tool's input parameters
output_schema:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: JSON Schema for the tool's output
metadata:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Additional metadata about the tool
additionalProperties: false
required:
- identifier
- provider_id
- type
- toolgroup_id
- description
title: Tool
description: A tool that can be invoked by agents.
ToolGroup:
type: object
properties:
@ -10645,19 +10546,6 @@ components:
title: ListToolGroupsResponse
description: >-
Response containing a list of tool groups.
ListToolsResponse:
type: object
properties:
data:
type: array
items:
$ref: '#/components/schemas/Tool'
description: List of tools
additionalProperties: false
required:
- data
title: ListToolsResponse
description: Response containing a list of tools.
ListVectorDBsResponse:
type: object
properties:

View file

@ -19,48 +19,6 @@ from llama_stack.schema_utils import json_schema_type, webmethod
from .rag_tool import RAGToolRuntime
@json_schema_type
class ToolParameter(BaseModel):
"""Parameter definition for a tool.
:param name: Name of the parameter
:param parameter_type: Type of the parameter (e.g., string, integer)
:param description: Human-readable description of what the parameter does
:param required: Whether this parameter is required for tool invocation
:param items: Type of the elements when parameter_type is array
:param title: (Optional) Title of the parameter
:param default: (Optional) Default value for the parameter if not provided
"""
name: str
parameter_type: str
description: str
required: bool = Field(default=True)
items: dict | None = None
title: str | None = None
default: Any | None = None
@json_schema_type
class Tool(Resource):
"""A tool that can be invoked by agents.
:param type: Type of resource, always 'tool'
:param toolgroup_id: ID of the tool group this tool belongs to
:param description: Human-readable description of what the tool does
:param input_schema: JSON Schema for the tool's input parameters
:param output_schema: JSON Schema for the tool's output
:param metadata: (Optional) Additional metadata about the tool
"""
type: Literal[ResourceType.tool] = ResourceType.tool
toolgroup_id: str
description: str
input_schema: dict[str, Any] | None = None
output_schema: dict[str, Any] | None = None
metadata: dict[str, Any] | None = None
@json_schema_type
class ToolDef(BaseModel):
"""Tool definition used in runtime contexts.
@ -70,8 +28,10 @@ class ToolDef(BaseModel):
:param input_schema: (Optional) JSON Schema for tool inputs (MCP inputSchema)
:param output_schema: (Optional) JSON Schema for tool outputs (MCP outputSchema)
:param metadata: (Optional) Additional metadata about the tool
:param toolgroup_id: (Optional) ID of the tool group this tool belongs to
"""
toolgroup_id: str | None = None
name: str
description: str | None = None
input_schema: dict[str, Any] | None = None
@ -126,7 +86,7 @@ class ToolInvocationResult(BaseModel):
class ToolStore(Protocol):
async def get_tool(self, tool_name: str) -> Tool: ...
async def get_tool(self, tool_name: str) -> ToolDef: ...
async def get_tool_group(self, toolgroup_id: str) -> ToolGroup: ...
@ -139,15 +99,6 @@ class ListToolGroupsResponse(BaseModel):
data: list[ToolGroup]
class ListToolsResponse(BaseModel):
"""Response containing a list of tools.
:param data: List of tools
"""
data: list[Tool]
class ListToolDefsResponse(BaseModel):
"""Response containing a list of tool definitions.
@ -198,11 +149,11 @@ class ToolGroups(Protocol):
...
@webmethod(route="/tools", method="GET", level=LLAMA_STACK_API_V1)
async def list_tools(self, toolgroup_id: str | None = None) -> ListToolsResponse:
async def list_tools(self, toolgroup_id: str | None = None) -> ListToolDefsResponse:
"""List tools with optional tool group.
:param toolgroup_id: The ID of the tool group to list tools for.
:returns: A ListToolsResponse.
:returns: A ListToolDefsResponse.
"""
...
@ -210,11 +161,11 @@ class ToolGroups(Protocol):
async def get_tool(
self,
tool_name: str,
) -> Tool:
) -> ToolDef:
"""Get a tool by its name.
:param tool_name: The name of the tool to get.
:returns: A Tool.
:returns: A ToolDef.
"""
...

View file

@ -22,7 +22,7 @@ from llama_stack.apis.safety import Safety
from llama_stack.apis.scoring import Scoring
from llama_stack.apis.scoring_functions import ScoringFn, ScoringFnInput
from llama_stack.apis.shields import Shield, ShieldInput
from llama_stack.apis.tools import Tool, ToolGroup, ToolGroupInput, ToolRuntime
from llama_stack.apis.tools import ToolGroup, ToolGroupInput, ToolRuntime
from llama_stack.apis.vector_dbs import VectorDB, VectorDBInput
from llama_stack.apis.vector_io import VectorIO
from llama_stack.core.access_control.datatypes import AccessRule
@ -84,15 +84,11 @@ class BenchmarkWithOwner(Benchmark, ResourceWithOwner):
pass
class ToolWithOwner(Tool, ResourceWithOwner):
pass
class ToolGroupWithOwner(ToolGroup, ResourceWithOwner):
pass
RoutableObject = Model | Shield | VectorDB | Dataset | ScoringFn | Benchmark | Tool | ToolGroup
RoutableObject = Model | Shield | VectorDB | Dataset | ScoringFn | Benchmark | ToolGroup
RoutableObjectWithProvider = Annotated[
ModelWithOwner
@ -101,7 +97,6 @@ RoutableObjectWithProvider = Annotated[
| DatasetWithOwner
| ScoringFnWithOwner
| BenchmarkWithOwner
| ToolWithOwner
| ToolGroupWithOwner,
Field(discriminator="type"),
]

View file

@ -11,7 +11,7 @@ from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.tools import (
ListToolsResponse,
ListToolDefsResponse,
RAGDocument,
RAGQueryConfig,
RAGQueryResult,
@ -86,6 +86,6 @@ class ToolRuntimeRouter(ToolRuntime):
async def list_runtime_tools(
self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
) -> ListToolsResponse:
) -> ListToolDefsResponse:
logger.debug(f"ToolRuntimeRouter.list_runtime_tools: {tool_group_id}")
return await self.routing_table.list_tools(tool_group_id)

View file

@ -8,7 +8,7 @@ from typing import Any
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.common.errors import ToolGroupNotFoundError
from llama_stack.apis.tools import ListToolGroupsResponse, ListToolsResponse, Tool, ToolGroup, ToolGroups
from llama_stack.apis.tools import ListToolDefsResponse, ListToolGroupsResponse, ToolDef, ToolGroup, ToolGroups
from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner
from llama_stack.log import get_logger
@ -27,7 +27,7 @@ def parse_toolgroup_from_toolgroup_name_pair(toolgroup_name_with_maybe_tool_name
class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
toolgroups_to_tools: dict[str, list[Tool]] = {}
toolgroups_to_tools: dict[str, list[ToolDef]] = {}
tool_to_toolgroup: dict[str, str] = {}
# overridden
@ -43,7 +43,7 @@ class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
routing_key = self.tool_to_toolgroup[routing_key]
return await super().get_provider_impl(routing_key, provider_id)
async def list_tools(self, toolgroup_id: str | None = None) -> ListToolsResponse:
async def list_tools(self, toolgroup_id: str | None = None) -> ListToolDefsResponse:
if toolgroup_id:
if group_id := parse_toolgroup_from_toolgroup_name_pair(toolgroup_id):
toolgroup_id = group_id
@ -68,31 +68,19 @@ class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
continue
all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier])
return ListToolsResponse(data=all_tools)
return ListToolDefsResponse(data=all_tools)
async def _index_tools(self, toolgroup: ToolGroup):
provider_impl = await super().get_provider_impl(toolgroup.identifier, toolgroup.provider_id)
tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, toolgroup.mcp_endpoint)
# TODO: kill this Tool vs ToolDef distinction
tooldefs = tooldefs_response.data
tools = []
for t in tooldefs:
tools.append(
Tool(
identifier=t.name,
toolgroup_id=toolgroup.identifier,
description=t.description or "",
input_schema=t.input_schema,
output_schema=t.output_schema,
metadata=t.metadata,
provider_id=toolgroup.provider_id,
)
)
t.toolgroup_id = toolgroup.identifier
self.toolgroups_to_tools[toolgroup.identifier] = tools
for tool in tools:
self.tool_to_toolgroup[tool.identifier] = toolgroup.identifier
self.toolgroups_to_tools[toolgroup.identifier] = tooldefs
for tool in tooldefs:
self.tool_to_toolgroup[tool.name] = toolgroup.identifier
async def list_tool_groups(self) -> ListToolGroupsResponse:
return ListToolGroupsResponse(data=await self.get_all_with_type("tool_group"))
@ -103,12 +91,12 @@ class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
raise ToolGroupNotFoundError(toolgroup_id)
return tool_group
async def get_tool(self, tool_name: str) -> Tool:
async def get_tool(self, tool_name: str) -> ToolDef:
if tool_name in self.tool_to_toolgroup:
toolgroup_id = self.tool_to_toolgroup[tool_name]
tools = self.toolgroups_to_tools[toolgroup_id]
for tool in tools:
if tool.identifier == tool_name:
if tool.name == tool_name:
return tool
raise ValueError(f"Tool '{tool_name}' not found")
@ -133,7 +121,6 @@ class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
# baked in some of the code and tests right now.
if not toolgroup.mcp_endpoint:
await self._index_tools(toolgroup)
return toolgroup
async def unregister_toolgroup(self, toolgroup_id: str) -> None:
await self.unregister_object(await self.get_tool_group(toolgroup_id))

View file

@ -36,7 +36,7 @@ class DistributionRegistry(Protocol):
REGISTER_PREFIX = "distributions:registry"
KEY_VERSION = "v9"
KEY_VERSION = "v10"
KEY_FORMAT = f"{REGISTER_PREFIX}:{KEY_VERSION}::" + "{type}:{identifier}"

View file

@ -81,7 +81,7 @@ def tool_chat_page():
for toolgroup_id in toolgroup_selection:
tools = client.tools.list(toolgroup_id=toolgroup_id)
grouped_tools[toolgroup_id] = [tool.identifier for tool in tools]
grouped_tools[toolgroup_id] = [tool.name for tool in tools]
total_tools += len(tools)
st.markdown(f"Active Tools: 🛠 {total_tools}")

View file

@ -37,14 +37,7 @@ RecursiveType = Primitive | list[Primitive] | dict[str, Primitive]
class ToolCall(BaseModel):
call_id: str
tool_name: BuiltinTool | str
# Plan is to deprecate the Dict in favor of a JSON string
# that is parsed on the client side instead of trying to manage
# the recursive type here.
# Making this a union so that client side can start prepping for this change.
# Eventually, we will remove both the Dict and arguments_json field,
# and arguments will just be a str
arguments: str | dict[str, RecursiveType]
arguments_json: str | None = None
arguments: str
@field_validator("tool_name", mode="before")
@classmethod

View file

@ -232,8 +232,7 @@ class ChatFormat:
ToolCall(
call_id=call_id,
tool_name=tool_name,
arguments=tool_arguments,
arguments_json=json.dumps(tool_arguments),
arguments=json.dumps(tool_arguments),
)
)
content = ""

View file

@ -298,8 +298,7 @@ class ChatFormat:
ToolCall(
call_id=call_id,
tool_name=tool_name,
arguments=tool_arguments,
arguments_json=json.dumps(tool_arguments),
arguments=json.dumps(tool_arguments),
)
)
content = ""

View file

@ -804,61 +804,34 @@ class ChatAgent(ShieldRunnerMixin):
[t.identifier for t in (await self.tool_groups_api.list_tool_groups()).data]
)
raise ValueError(f"Toolgroup {toolgroup_name} not found, available toolgroups: {available_tool_groups}")
if input_tool_name is not None and not any(tool.identifier == input_tool_name for tool in tools.data):
if input_tool_name is not None and not any(tool.name == input_tool_name for tool in tools.data):
raise ValueError(
f"Tool {input_tool_name} not found in toolgroup {toolgroup_name}. Available tools: {', '.join([tool.identifier for tool in tools.data])}"
f"Tool {input_tool_name} not found in toolgroup {toolgroup_name}. Available tools: {', '.join([tool.name for tool in tools.data])}"
)
for tool_def in tools.data:
if toolgroup_name.startswith("builtin") and toolgroup_name != RAG_TOOL_GROUP:
identifier: str | BuiltinTool | None = tool_def.identifier
identifier: str | BuiltinTool | None = tool_def.name
if identifier == "web_search":
identifier = BuiltinTool.brave_search
else:
identifier = BuiltinTool(identifier)
else:
# add if tool_name is unspecified or the tool_def identifier is the same as the tool_name
if input_tool_name in (None, tool_def.identifier):
identifier = tool_def.identifier
if input_tool_name in (None, tool_def.name):
identifier = tool_def.name
else:
identifier = None
if tool_name_to_def.get(identifier, None):
raise ValueError(f"Tool {identifier} already exists")
if identifier:
# Build JSON Schema from tool parameters
properties = {}
required = []
for param in tool_def.parameters:
param_schema = {
"type": param.parameter_type,
"description": param.description,
}
if param.default is not None:
param_schema["default"] = param.default
if param.items is not None:
param_schema["items"] = param.items
if param.title is not None:
param_schema["title"] = param.title
properties[param.name] = param_schema
if param.required:
required.append(param.name)
input_schema = {
"type": "object",
"properties": properties,
"required": required,
}
tool_name_to_def[tool_def.identifier] = ToolDefinition(
tool_name_to_def[identifier] = ToolDefinition(
tool_name=identifier,
description=tool_def.description,
input_schema=input_schema,
input_schema=tool_def.input_schema,
)
tool_name_to_args[tool_def.identifier] = toolgroup_to_args.get(toolgroup_name, {})
tool_name_to_args[identifier] = toolgroup_to_args.get(toolgroup_name, {})
self.tool_defs, self.tool_name_to_args = (
list(tool_name_to_def.values()),

View file

@ -33,7 +33,6 @@ from llama_stack.apis.tools import (
ToolDef,
ToolGroup,
ToolInvocationResult,
ToolParameter,
ToolRuntime,
)
from llama_stack.apis.vector_io import (
@ -301,13 +300,16 @@ class MemoryToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, RAGToolRunti
ToolDef(
name="knowledge_search",
description="Search for information in a database.",
parameters=[
ToolParameter(
name="query",
description="The query to search for. Can be a natural language sentence or keywords.",
parameter_type="string",
),
],
input_schema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for. Can be a natural language sentence or keywords.",
}
},
"required": ["query"],
},
),
]
)

View file

@ -99,8 +99,7 @@ def _convert_to_vllm_tool_calls_in_response(
ToolCall(
call_id=call.id,
tool_name=call.function.name,
arguments=json.loads(call.function.arguments),
arguments_json=call.function.arguments,
arguments=call.function.arguments,
)
for call in tool_calls
]
@ -160,7 +159,6 @@ def _process_vllm_chat_completion_end_of_stream(
for _index, tool_call_buf in sorted(tool_call_bufs.items()):
args_str = tool_call_buf.arguments or "{}"
try:
args = json.loads(args_str)
chunks.append(
ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
@ -169,8 +167,7 @@ def _process_vllm_chat_completion_end_of_stream(
tool_call=ToolCall(
call_id=tool_call_buf.call_id,
tool_name=tool_call_buf.tool_name,
arguments=args,
arguments_json=args_str,
arguments=args_str,
),
parse_status=ToolCallParseStatus.succeeded,
),

View file

@ -15,7 +15,6 @@ from llama_stack.apis.tools import (
ToolDef,
ToolGroup,
ToolInvocationResult,
ToolParameter,
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
@ -57,13 +56,16 @@ class BingSearchToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsReq
ToolDef(
name="web_search",
description="Search the web using Bing Search API",
parameters=[
ToolParameter(
name="query",
description="The query to search for",
parameter_type="string",
)
],
input_schema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for",
}
},
"required": ["query"],
},
)
]
)

View file

@ -14,7 +14,6 @@ from llama_stack.apis.tools import (
ToolDef,
ToolGroup,
ToolInvocationResult,
ToolParameter,
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
@ -56,13 +55,16 @@ class BraveSearchToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsRe
ToolDef(
name="web_search",
description="Search the web for information",
parameters=[
ToolParameter(
name="query",
description="The query to search for",
parameter_type="string",
)
],
input_schema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for",
}
},
"required": ["query"],
},
built_in_type=BuiltinTool.brave_search,
)
]

View file

@ -15,7 +15,6 @@ from llama_stack.apis.tools import (
ToolDef,
ToolGroup,
ToolInvocationResult,
ToolParameter,
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
@ -56,13 +55,16 @@ class TavilySearchToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsR
ToolDef(
name="web_search",
description="Search the web for information",
parameters=[
ToolParameter(
name="query",
description="The query to search for",
parameter_type="string",
)
],
input_schema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to search for",
}
},
"required": ["query"],
},
)
]
)

View file

@ -15,7 +15,6 @@ from llama_stack.apis.tools import (
ToolDef,
ToolGroup,
ToolInvocationResult,
ToolParameter,
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
@ -57,13 +56,16 @@ class WolframAlphaToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsR
ToolDef(
name="wolfram_alpha",
description="Query WolframAlpha for computational knowledge",
parameters=[
ToolParameter(
name="query",
description="The query to compute",
parameter_type="string",
)
],
input_schema={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The query to compute",
}
},
"required": ["query"],
},
)
]
)

View file

@ -538,18 +538,13 @@ async def convert_message_to_openai_dict(message: Message, download: bool = Fals
if isinstance(tool_name, BuiltinTool):
tool_name = tool_name.value
# arguments_json can be None, so attempt it first and fall back to arguments
if hasattr(tc, "arguments_json") and tc.arguments_json:
arguments = tc.arguments_json
else:
arguments = json.dumps(tc.arguments)
result["tool_calls"].append(
{
"id": tc.call_id,
"type": "function",
"function": {
"name": tool_name,
"arguments": arguments,
"arguments": tc.arguments,
},
}
)
@ -685,8 +680,7 @@ def convert_tool_call(
valid_tool_call = ToolCall(
call_id=tool_call.id,
tool_name=tool_call.function.name,
arguments=json.loads(tool_call.function.arguments),
arguments_json=tool_call.function.arguments,
arguments=tool_call.function.arguments,
)
except Exception:
return UnparseableToolCall(
@ -897,8 +891,7 @@ def _convert_openai_tool_calls(
ToolCall(
call_id=call.id,
tool_name=call.function.name,
arguments=json.loads(call.function.arguments),
arguments_json=call.function.arguments,
arguments=call.function.arguments,
)
for call in tool_calls
]
@ -1184,8 +1177,7 @@ async def convert_openai_chat_completion_stream(
tool_call = ToolCall(
call_id=buffer["call_id"],
tool_name=buffer["name"],
arguments=arguments,
arguments_json=buffer["arguments"],
arguments=buffer["arguments"],
)
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
@ -1418,7 +1410,7 @@ class OpenAIChatCompletionToLlamaStackMixin:
openai_tool_call = OpenAIChoiceDeltaToolCall(
index=0,
function=OpenAIChoiceDeltaToolCallFunction(
arguments=tool_call.arguments_json,
arguments=tool_call.arguments,
),
)
delta = OpenAIChoiceDelta(tool_calls=[openai_tool_call])

View file

@ -222,16 +222,16 @@ def make_mcp_server(required_auth_token: str | None = None, tools: dict[str, Cal
def run_server():
try:
logger.info(f"Starting MCP server on port {port}")
logger.debug(f"Starting MCP server on port {port}")
server_instance.run()
logger.info(f"MCP server on port {port} has stopped")
logger.debug(f"MCP server on port {port} has stopped")
except Exception as e:
logger.error(f"MCP server failed to start on port {port}: {e}")
raise
# Start the server in a new thread
server_thread = threading.Thread(target=run_server, daemon=True)
logger.info(f"Starting MCP server thread on port {port}")
logger.debug(f"Starting MCP server thread on port {port}")
server_thread.start()
# Polling until the server is ready
@ -239,13 +239,13 @@ def make_mcp_server(required_auth_token: str | None = None, tools: dict[str, Cal
start_time = time.time()
server_url = f"http://localhost:{port}/sse"
logger.info(f"Waiting for MCP server to be ready at {server_url}")
logger.debug(f"Waiting for MCP server to be ready at {server_url}")
while time.time() - start_time < timeout:
try:
response = httpx.get(server_url)
if response.status_code in [200, 401]:
logger.info(f"MCP server is ready on port {port} (status: {response.status_code})")
logger.debug(f"MCP server is ready on port {port} (status: {response.status_code})")
break
except httpx.RequestError as e:
logger.debug(f"Server not ready yet, retrying... ({e})")
@ -261,14 +261,14 @@ def make_mcp_server(required_auth_token: str | None = None, tools: dict[str, Cal
try:
yield {"server_url": server_url}
finally:
logger.info(f"Shutting down MCP server on port {port}")
logger.debug(f"Shutting down MCP server on port {port}")
server_instance.should_exit = True
time.sleep(0.5)
# Force shutdown if still running
if server_thread.is_alive():
try:
logger.info("Force shutting down server thread")
logger.debug("Force shutting down server thread")
if hasattr(server_instance, "servers") and server_instance.servers:
for srv in server_instance.servers:
srv.close()

View file

@ -213,7 +213,7 @@ class TestMCPToolsInChatCompletion:
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.input_schema if hasattr(tool, "input_schema") else {},
"parameters": tool.input_schema or {},
},
}
)

View file

@ -0,0 +1,103 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "What time is it in UTC?"
}
],
"stream": true,
"tools": [
{
"type": "function",
"function": {
"name": "get_time",
"description": "Get current time",
"parameters": {
"type": "object",
"properties": {
"timezone": {
"type": "string"
}
}
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-187",
"choices": [
{
"delta": {
"content": "",
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "call_nng2lhyy",
"function": {
"arguments": "{\"timezone\":\"UTC\"}",
"name": "get_time"
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1759351462,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-187",
"choices": [
{
"delta": {
"content": "",
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": null
},
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null
}
],
"created": 1759351462,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": null
}
}
],
"is_streaming": true
}
}

View file

@ -0,0 +1,743 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"book_flight\",\n \"description\": \"\n Book a flight with passenger and payment information.\n\n This tool uses JSON Schema $ref and $defs for type reuse.\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"flight\", \"passengers\", \"payment\"],\n \"properties\": {\n \"flight\": {\n \"type\": \"object\",\n \"description\": \"\"\n },\n \"passengers\": {\n \"type\": \"array\",\n \"description\": \"\"\n },\n \"payment\": {\n \"type\": \"object\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"process_order\",\n \"description\": \"\n Process an order with nested address information.\n\n Uses nested objects and $ref.\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"order_data\"],\n \"properties\": {\n \"order_data\": {\n \"type\": \"object\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"flexible_contact\",\n \"description\": \"\n Accept flexible contact (email or phone).\n\n Uses anyOf schema.\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"contact_info\"],\n \"properties\": {\n \"contact_info\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant that can process orders and book flights.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nProcess an order with 2 widgets going to 123 Main St, San Francisco<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:15.998653Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "[",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.042936Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "process",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.086689Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_order",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.128398Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "(order",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.172808Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_data",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.215123Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "={\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.259326Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "order",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.303332Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_id",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.345453Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\":",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.388745Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " ",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.432258Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "1",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.474552Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.51763Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.561623Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "customer",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.605498Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_name",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.649228Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\":",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.691311Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.735229Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "John",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.77891Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Doe",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.822779Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.864798Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.908719Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "address",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.952037Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\":",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:16.997344Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " {\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.040154Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "street",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.084207Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\":",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.125633Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.169076Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "123",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.211634Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Main",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.254149Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " St",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.298417Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.339693Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.382919Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "city",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.42623Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\":",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.468642Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.512136Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "San",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.553518Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Francisco",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.597107Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\"}}",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.640858Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ")]",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T22:46:17.685634Z",
"done": true,
"done_reason": "stop",
"total_duration": 7493315500,
"load_duration": 4640587750,
"prompt_eval_count": 556,
"prompt_eval_duration": 1163238292,
"eval_count": 40,
"eval_duration": 1687901500,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -0,0 +1,86 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "What's the weather in San Francisco?"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": [
"location"
]
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-190",
"choices": [
{
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null,
"message": {
"content": "",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": [
{
"id": "call_st2uc9zo",
"function": {
"arguments": "{\"location\":\"San Francisco\"}",
"name": "get_weather"
},
"type": "function",
"index": 0
}
]
}
}
],
"created": 1759351458,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 18,
"prompt_tokens": 161,
"total_tokens": 179,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,78 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "Call the no args tool"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "no_args_tool",
"description": "Tool with no arguments",
"parameters": {
"type": "object",
"properties": {}
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-695",
"choices": [
{
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null,
"message": {
"content": "",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": [
{
"id": "call_bfwjy2i9",
"function": {
"arguments": "{}",
"name": "no_args_tool"
},
"type": "function",
"index": 0
}
]
}
}
],
"created": 1759351462,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 14,
"prompt_tokens": 148,
"total_tokens": 162,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,97 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "Calculate 5 + 3"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "calculate",
"description": "",
"parameters": {
"properties": {
"x": {
"title": "X",
"type": "number"
},
"y": {
"title": "Y",
"type": "number"
},
"operation": {
"title": "Operation",
"type": "string"
}
},
"required": [
"x",
"y",
"operation"
],
"title": "calculateArguments",
"type": "object"
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-985",
"choices": [
{
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null,
"message": {
"content": "",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": [
{
"id": "call_338dhuqf",
"function": {
"arguments": "{\"operation\":\"+\",\"x\":\"5\",\"y\":\"3\"}",
"name": "calculate"
},
"type": "function",
"index": 0
}
]
}
}
],
"created": 1759357337,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 27,
"prompt_tokens": 172,
"total_tokens": 199,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

View file

@ -11,62 +11,7 @@
"body": {
"__type__": "ollama._types.ProcessResponse",
"__data__": {
"models": [
{
"model": "llama3.2:3b",
"name": "llama3.2:3b",
"digest": "a80c4f17acd55265feec403c7aef86be0c25983ab279d83f3bcd3abbcb5b8b72",
"expires_at": "2025-10-01T14:42:00.149364-07:00",
"size": 3367856128,
"size_vram": 3367856128,
"details": {
"parent_model": "",
"format": "gguf",
"family": "llama",
"families": [
"llama"
],
"parameter_size": "3.2B",
"quantization_level": "Q4_K_M"
}
},
{
"model": "llama3.2:3b-instruct-fp16",
"name": "llama3.2:3b-instruct-fp16",
"digest": "195a8c01d91ec3cb1e0aad4624a51f2602c51fa7d96110f8ab5a20c84081804d",
"expires_at": "2025-10-01T13:49:34.349274-07:00",
"size": 7919570944,
"size_vram": 7919570944,
"details": {
"parent_model": "",
"format": "gguf",
"family": "llama",
"families": [
"llama"
],
"parameter_size": "3.2B",
"quantization_level": "F16"
}
},
{
"model": "all-minilm:l6-v2",
"name": "all-minilm:l6-v2",
"digest": "1b226e2802dbb772b5fc32a58f103ca1804ef7501331012de126ab22f67475ef",
"expires_at": "2025-10-01T13:49:25.407653-07:00",
"size": 585846784,
"size_vram": 585846784,
"details": {
"parent_model": "",
"format": "gguf",
"family": "bert",
"families": [
"bert"
],
"parameter_size": "23M",
"quantization_level": "F16"
}
}
]
"models": []
}
},
"is_streaming": false

View file

@ -0,0 +1,117 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "Book a flight from SFO to JFK for John Doe"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "book_flight",
"description": "Book a flight",
"parameters": {
"type": "object",
"properties": {
"flight": {
"$ref": "#/$defs/FlightInfo"
},
"passenger": {
"$ref": "#/$defs/Passenger"
}
},
"required": [
"flight",
"passenger"
],
"$defs": {
"FlightInfo": {
"type": "object",
"properties": {
"from": {
"type": "string"
},
"to": {
"type": "string"
},
"date": {
"type": "string",
"format": "date"
}
}
},
"Passenger": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "integer"
}
}
}
}
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-559",
"choices": [
{
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null,
"message": {
"content": "",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": [
{
"id": "call_p9nnpr9l",
"function": {
"arguments": "{\"flight\":\"{'from':'SFO','to':'JFK'}\",\"passenger\":{\"age\":\"30\",\"name\":\"John Doe\"}}",
"name": "book_flight"
},
"type": "function",
"index": 0
}
]
}
}
],
"created": 1759351460,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 38,
"prompt_tokens": 227,
"total_tokens": 265,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,122 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "Use one of the available tools"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "simple",
"parameters": {
"type": "object",
"properties": {
"x": {
"type": "string"
}
}
}
}
},
{
"type": "function",
"function": {
"name": "complex",
"parameters": {
"type": "object",
"properties": {
"data": {
"$ref": "#/$defs/Complex"
}
},
"$defs": {
"Complex": {
"type": "object",
"properties": {
"nested": {
"type": "array",
"items": {
"type": "number"
}
}
}
}
}
}
}
},
{
"type": "function",
"function": {
"name": "with_output",
"parameters": {
"type": "object",
"properties": {
"input": {
"type": "string"
}
}
}
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": {
"id": "chatcmpl-927",
"choices": [
{
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null,
"message": {
"content": "",
"refusal": null,
"role": "assistant",
"annotations": null,
"audio": null,
"function_call": null,
"tool_calls": [
{
"id": "call_umobdfav",
"function": {
"arguments": "{\"data\":\"[[1, 2], [3, 4]]\"}",
"name": "complex"
},
"type": "function",
"index": 0
}
]
}
}
],
"created": 1759351464,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": {
"completion_tokens": 27,
"prompt_tokens": 246,
"total_tokens": 273,
"completion_tokens_details": null,
"prompt_tokens_details": null
}
}
},
"is_streaming": false
}
}

File diff suppressed because it is too large Load diff

View file

@ -26,7 +26,7 @@ def test_web_search_tool(llama_stack_client, sample_search_query):
pytest.skip("TAVILY_SEARCH_API_KEY not set, skipping test")
tools = llama_stack_client.tool_runtime.list_tools()
assert any(tool.identifier == "web_search" for tool in tools)
assert any(tool.name == "web_search" for tool in tools)
response = llama_stack_client.tool_runtime.invoke_tool(
tool_name="web_search", kwargs={"query": sample_search_query}
@ -52,7 +52,7 @@ def test_wolfram_alpha_tool(llama_stack_client, sample_wolfram_alpha_query):
pytest.skip("WOLFRAM_ALPHA_API_KEY not set, skipping test")
tools = llama_stack_client.tool_runtime.list_tools()
assert any(tool.identifier == "wolfram_alpha" for tool in tools)
assert any(tool.name == "wolfram_alpha" for tool in tools)
response = llama_stack_client.tool_runtime.invoke_tool(
tool_name="wolfram_alpha", kwargs={"query": sample_wolfram_alpha_query}
)

View file

@ -362,6 +362,7 @@ class TestAgentWithMCPTools:
model=text_model_id,
instructions="You are a helpful assistant that can process orders and book flights.",
tools=[test_toolgroup_id],
extra_headers=auth_headers,
)
session_id = agent.create_session("test-session-complex")

View file

@ -138,8 +138,7 @@ async def test_tool_call_response(vllm_inference_adapter):
ToolCall(
call_id="foo",
tool_name="knowledge_search",
arguments={"query": "How many?"},
arguments_json='{"query": "How many?"}',
arguments='{"query": "How many?"}',
)
],
),
@ -263,7 +262,7 @@ async def test_tool_call_delta_streaming_arguments_dict():
assert chunks[1].event.event_type.value == "progress"
assert chunks[1].event.delta.type == "tool_call"
assert chunks[1].event.delta.parse_status.value == "succeeded"
assert chunks[1].event.delta.tool_call.arguments_json == '{"number": 28, "power": 3}'
assert chunks[1].event.delta.tool_call.arguments == '{"number": 28, "power": 3}'
assert chunks[2].event.event_type.value == "complete"
@ -339,11 +338,11 @@ async def test_multiple_tool_calls():
assert chunks[1].event.event_type.value == "progress"
assert chunks[1].event.delta.type == "tool_call"
assert chunks[1].event.delta.parse_status.value == "succeeded"
assert chunks[1].event.delta.tool_call.arguments_json == '{"number": 28, "power": 3}'
assert chunks[1].event.delta.tool_call.arguments == '{"number": 28, "power": 3}'
assert chunks[2].event.event_type.value == "progress"
assert chunks[2].event.delta.type == "tool_call"
assert chunks[2].event.delta.parse_status.value == "succeeded"
assert chunks[2].event.delta.tool_call.arguments_json == '{"first_number": 4, "second_number": 7}'
assert chunks[2].event.delta.tool_call.arguments == '{"first_number": 4, "second_number": 7}'
assert chunks[3].event.event_type.value == "complete"

View file

@ -41,9 +41,7 @@ async def test_convert_message_to_openai_dict():
async def test_convert_message_to_openai_dict_with_tool_call():
message = CompletionMessage(
content="",
tool_calls=[
ToolCall(call_id="123", tool_name="test_tool", arguments_json='{"foo": "bar"}', arguments={"foo": "bar"})
],
tool_calls=[ToolCall(call_id="123", tool_name="test_tool", arguments='{"foo": "bar"}')],
stop_reason=StopReason.end_of_turn,
)
@ -65,8 +63,7 @@ async def test_convert_message_to_openai_dict_with_builtin_tool_call():
ToolCall(
call_id="123",
tool_name=BuiltinTool.brave_search,
arguments_json='{"foo": "bar"}',
arguments={"foo": "bar"},
arguments='{"foo": "bar"}',
)
],
stop_reason=StopReason.end_of_turn,
@ -202,8 +199,7 @@ async def test_convert_message_to_openai_dict_new_completion_message_with_tool_c
ToolCall(
call_id="call_123",
tool_name="get_weather",
arguments={"city": "Sligo"},
arguments_json='{"city": "Sligo"}',
arguments='{"city": "Sligo"}',
)
],
stop_reason=StopReason.end_of_turn,