mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
This is a sweeping change to clean up some gunk around our "Tool" definitions. First, we had two types `Tool` and `ToolDef`. The first of these was a "Resource" type for the registry but we had stopped registering tools inside the Registry long back (and only registered ToolGroups.) The latter was for specifying tools for the Agents API. This PR removes the former and adds an optional `toolgroup_id` field to the latter. Secondly, as pointed out by @bbrowning in https://github.com/llamastack/llama-stack/pull/3003#issuecomment-3245270132, we were doing a lossy conversion from a full JSON schema from the MCP tool specification into our ToolDefinition to send it to the model. There is no necessity to do this -- we ourselves aren't doing any execution at all but merely passing it to the chat completions API which supports this. By doing this (and by doing it poorly), we encountered limitations like not supporting array items, or not resolving $refs, etc. To fix this, we replaced the `parameters` field by `{ input_schema, output_schema }` which can be full blown JSON schemas. Finally, there were some types in our llama-related chat format conversion which needed some cleanup. We are taking this opportunity to clean those up. This PR is a substantial breaking change to the API. However, given our window for introducing breaking changes, this suits us just fine. I will be landing a concurrent `llama-stack-client` change as well since API shapes are changing.
148 lines
6.5 KiB
Python
148 lines
6.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from collections.abc import AsyncGenerator
|
|
from contextlib import asynccontextmanager
|
|
from enum import Enum
|
|
from typing import Any, cast
|
|
|
|
import httpx
|
|
from mcp import ClientSession, McpError
|
|
from mcp import types as mcp_types
|
|
from mcp.client.sse import sse_client
|
|
from mcp.client.streamable_http import streamablehttp_client
|
|
|
|
from llama_stack.apis.common.content_types import ImageContentItem, InterleavedContentItem, TextContentItem
|
|
from llama_stack.apis.tools import (
|
|
ListToolDefsResponse,
|
|
ToolDef,
|
|
ToolInvocationResult,
|
|
)
|
|
from llama_stack.core.datatypes import AuthenticationRequiredError
|
|
from llama_stack.log import get_logger
|
|
from llama_stack.providers.utils.tools.ttl_dict import TTLDict
|
|
|
|
logger = get_logger(__name__, category="tools")
|
|
|
|
protocol_cache = TTLDict(ttl_seconds=3600)
|
|
|
|
|
|
class MCPProtol(Enum):
|
|
UNKNOWN = 0
|
|
STREAMABLE_HTTP = 1
|
|
SSE = 2
|
|
|
|
|
|
@asynccontextmanager
|
|
async def client_wrapper(endpoint: str, headers: dict[str, str]) -> AsyncGenerator[ClientSession, Any]:
|
|
# we use a ttl'd dict to cache the happy path protocol for each endpoint
|
|
# but, we always fall back to trying the other protocol if we cannot initialize the session
|
|
connection_strategies = [MCPProtol.STREAMABLE_HTTP, MCPProtol.SSE]
|
|
mcp_protocol = protocol_cache.get(endpoint, default=MCPProtol.UNKNOWN)
|
|
if mcp_protocol == MCPProtol.SSE:
|
|
connection_strategies = [MCPProtol.SSE, MCPProtol.STREAMABLE_HTTP]
|
|
|
|
for i, strategy in enumerate(connection_strategies):
|
|
try:
|
|
client = streamablehttp_client
|
|
if strategy == MCPProtol.SSE:
|
|
client = sse_client
|
|
async with client(endpoint, headers=headers) as client_streams:
|
|
async with ClientSession(read_stream=client_streams[0], write_stream=client_streams[1]) as session:
|
|
await session.initialize()
|
|
protocol_cache[endpoint] = strategy
|
|
yield session
|
|
return
|
|
except* httpx.HTTPStatusError as eg:
|
|
for exc in eg.exceptions:
|
|
# mypy does not currently narrow the type of `eg.exceptions` based on the `except*` filter,
|
|
# so we explicitly cast each item to httpx.HTTPStatusError. This is safe because
|
|
# `except* httpx.HTTPStatusError` guarantees all exceptions in `eg.exceptions` are of that type.
|
|
err = cast(httpx.HTTPStatusError, exc)
|
|
if err.response.status_code == 401:
|
|
raise AuthenticationRequiredError(exc) from exc
|
|
if i == len(connection_strategies) - 1:
|
|
raise
|
|
except* httpx.ConnectError as eg:
|
|
# Connection refused, server down, network unreachable
|
|
if i == len(connection_strategies) - 1:
|
|
error_msg = f"Failed to connect to MCP server at {endpoint}: Connection refused"
|
|
logger.error(f"MCP connection error: {error_msg}")
|
|
raise ConnectionError(error_msg) from eg
|
|
else:
|
|
logger.warning(
|
|
f"failed to connect to MCP server at {endpoint} via {strategy.name}, falling back to {connection_strategies[i + 1].name}"
|
|
)
|
|
except* httpx.TimeoutException as eg:
|
|
# Request timeout, server too slow
|
|
if i == len(connection_strategies) - 1:
|
|
error_msg = f"MCP server at {endpoint} timed out"
|
|
logger.error(f"MCP timeout error: {error_msg}")
|
|
raise TimeoutError(error_msg) from eg
|
|
else:
|
|
logger.warning(
|
|
f"MCP server at {endpoint} timed out via {strategy.name}, falling back to {connection_strategies[i + 1].name}"
|
|
)
|
|
except* httpx.RequestError as eg:
|
|
# DNS resolution failures, network errors, invalid URLs
|
|
if i == len(connection_strategies) - 1:
|
|
# Get the first exception's message for the error string
|
|
exc_msg = str(eg.exceptions[0]) if eg.exceptions else "Unknown error"
|
|
error_msg = f"Network error connecting to MCP server at {endpoint}: {exc_msg}"
|
|
logger.error(f"MCP network error: {error_msg}")
|
|
raise ConnectionError(error_msg) from eg
|
|
else:
|
|
logger.warning(
|
|
f"network error connecting to MCP server at {endpoint} via {strategy.name}, falling back to {connection_strategies[i + 1].name}"
|
|
)
|
|
except* McpError:
|
|
if i < len(connection_strategies) - 1:
|
|
logger.warning(
|
|
f"failed to connect via {strategy.name}, falling back to {connection_strategies[i + 1].name}"
|
|
)
|
|
else:
|
|
raise
|
|
|
|
|
|
async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> ListToolDefsResponse:
|
|
tools = []
|
|
async with client_wrapper(endpoint, headers) as session:
|
|
tools_result = await session.list_tools()
|
|
for tool in tools_result.tools:
|
|
tools.append(
|
|
ToolDef(
|
|
name=tool.name,
|
|
description=tool.description,
|
|
input_schema=tool.inputSchema,
|
|
output_schema=getattr(tool, "outputSchema", None),
|
|
metadata={
|
|
"endpoint": endpoint,
|
|
},
|
|
)
|
|
)
|
|
return ListToolDefsResponse(data=tools)
|
|
|
|
|
|
async def invoke_mcp_tool(
|
|
endpoint: str, headers: dict[str, str], tool_name: str, kwargs: dict[str, Any]
|
|
) -> ToolInvocationResult:
|
|
async with client_wrapper(endpoint, headers) as session:
|
|
result = await session.call_tool(tool_name, kwargs)
|
|
|
|
content: list[InterleavedContentItem] = []
|
|
for item in result.content:
|
|
if isinstance(item, mcp_types.TextContent):
|
|
content.append(TextContentItem(text=item.text))
|
|
elif isinstance(item, mcp_types.ImageContent):
|
|
content.append(ImageContentItem(image=item.data))
|
|
elif isinstance(item, mcp_types.EmbeddedResource):
|
|
logger.warning(f"EmbeddedResource is not supported: {item}")
|
|
else:
|
|
raise ValueError(f"Unknown content type: {type(item)}")
|
|
return ToolInvocationResult(
|
|
content=content,
|
|
error_code=1 if result.isError else 0,
|
|
)
|