mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
integrate connectors api within responses api impl in agentic provider
Signed-off-by: Jaideep Rao <jrao@redhat.com>
This commit is contained in:
parent
3b1b37862e
commit
01a76e0084
13 changed files with 88 additions and 23 deletions
|
|
@ -7258,8 +7258,13 @@ components:
|
|||
type: string
|
||||
title: Server Label
|
||||
server_url:
|
||||
type: string
|
||||
title: Server Url
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
connector_id:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
headers:
|
||||
anyOf:
|
||||
- additionalProperties: true
|
||||
|
|
@ -7292,7 +7297,6 @@ components:
|
|||
type: object
|
||||
required:
|
||||
- server_label
|
||||
- server_url
|
||||
title: OpenAIResponseInputToolMCP
|
||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||
CreateOpenaiResponseRequest:
|
||||
|
|
|
|||
10
docs/static/deprecated-llama-stack-spec.yaml
vendored
10
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -4039,8 +4039,13 @@ components:
|
|||
type: string
|
||||
title: Server Label
|
||||
server_url:
|
||||
type: string
|
||||
title: Server Url
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
connector_id:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
headers:
|
||||
anyOf:
|
||||
- additionalProperties: true
|
||||
|
|
@ -4073,7 +4078,6 @@ components:
|
|||
type: object
|
||||
required:
|
||||
- server_label
|
||||
- server_url
|
||||
title: OpenAIResponseInputToolMCP
|
||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||
CreateOpenaiResponseRequest:
|
||||
|
|
|
|||
10
docs/static/experimental-llama-stack-spec.yaml
vendored
10
docs/static/experimental-llama-stack-spec.yaml
vendored
|
|
@ -3792,8 +3792,13 @@ components:
|
|||
type: string
|
||||
title: Server Label
|
||||
server_url:
|
||||
type: string
|
||||
title: Server Url
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
connector_id:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
headers:
|
||||
anyOf:
|
||||
- additionalProperties: true
|
||||
|
|
@ -3826,7 +3831,6 @@ components:
|
|||
type: object
|
||||
required:
|
||||
- server_label
|
||||
- server_url
|
||||
title: OpenAIResponseInputToolMCP
|
||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||
OpenAIResponseObject:
|
||||
|
|
|
|||
10
docs/static/llama-stack-spec.yaml
vendored
10
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -5850,8 +5850,13 @@ components:
|
|||
type: string
|
||||
title: Server Label
|
||||
server_url:
|
||||
type: string
|
||||
title: Server Url
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
connector_id:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
headers:
|
||||
anyOf:
|
||||
- additionalProperties: true
|
||||
|
|
@ -5884,7 +5889,6 @@ components:
|
|||
type: object
|
||||
required:
|
||||
- server_label
|
||||
- server_url
|
||||
title: OpenAIResponseInputToolMCP
|
||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||
CreateOpenaiResponseRequest:
|
||||
|
|
|
|||
10
docs/static/stainless-llama-stack-spec.yaml
vendored
10
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -7258,8 +7258,13 @@ components:
|
|||
type: string
|
||||
title: Server Label
|
||||
server_url:
|
||||
type: string
|
||||
title: Server Url
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
connector_id:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: 'null'
|
||||
headers:
|
||||
anyOf:
|
||||
- additionalProperties: true
|
||||
|
|
@ -7292,7 +7297,6 @@ components:
|
|||
type: object
|
||||
required:
|
||||
- server_label
|
||||
- server_url
|
||||
title: OpenAIResponseInputToolMCP
|
||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||
CreateOpenaiResponseRequest:
|
||||
|
|
|
|||
|
|
@ -14,18 +14,16 @@ from llama_stack.log import get_logger
|
|||
from llama_stack.providers.utils.tools.mcp import get_mcp_server_info, list_mcp_tools
|
||||
from llama_stack_api import (
|
||||
Connector,
|
||||
ConnectorNotFoundError,
|
||||
Connectors,
|
||||
ConnectorToolNotFoundError,
|
||||
ConnectorType,
|
||||
ListConnectorsResponse,
|
||||
ListRegistriesResponse,
|
||||
ListToolsResponse,
|
||||
Registry,
|
||||
ToolDef,
|
||||
)
|
||||
from llama_stack_api.common.errors import (
|
||||
ConnectorNotFoundError,
|
||||
ConnectorToolNotFoundError,
|
||||
RegistryNotFoundError,
|
||||
ToolDef,
|
||||
)
|
||||
|
||||
logger = get_logger(name=__name__, category="connectors")
|
||||
|
|
@ -55,6 +53,17 @@ class ConnectorServiceImpl(Connectors):
|
|||
self.connectors_map: dict[str, Connector] = {}
|
||||
self.registries_map: dict[str, Registry] = {}
|
||||
|
||||
def get_connector_url(self, connector_id: str) -> str | None:
|
||||
"""Get the URL of a connector by its ID.
|
||||
|
||||
:param connector_id: The ID of the connector to get the URL for.
|
||||
:returns: The URL of the connector.
|
||||
"""
|
||||
connector = self.connectors_map.get(connector_id)
|
||||
if connector is None:
|
||||
return None
|
||||
return connector.url
|
||||
|
||||
async def register_connector(
|
||||
self,
|
||||
url: str,
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ async def get_provider_impl(
|
|||
deps[Api.conversations],
|
||||
deps[Api.prompts],
|
||||
deps[Api.files],
|
||||
deps[Api.connectors],
|
||||
policy,
|
||||
)
|
||||
await impl.initialize()
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from llama_stack.log import get_logger
|
|||
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
||||
from llama_stack_api import (
|
||||
Agents,
|
||||
Connectors,
|
||||
Conversations,
|
||||
Files,
|
||||
Inference,
|
||||
|
|
@ -49,6 +50,7 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
conversations_api: Conversations,
|
||||
prompts_api: Prompts,
|
||||
files_api: Files,
|
||||
connectors_api: Connectors,
|
||||
policy: list[AccessRule],
|
||||
):
|
||||
self.config = config
|
||||
|
|
@ -60,6 +62,7 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
self.conversations_api = conversations_api
|
||||
self.prompts_api = prompts_api
|
||||
self.files_api = files_api
|
||||
self.connectors_api = connectors_api
|
||||
self.in_memory_store = InmemoryKVStoreImpl()
|
||||
self.openai_responses_impl: OpenAIResponsesImpl | None = None
|
||||
self.policy = policy
|
||||
|
|
@ -78,6 +81,7 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
conversations_api=self.conversations_api,
|
||||
prompts_api=self.prompts_api,
|
||||
files_api=self.files_api,
|
||||
connectors_api=self.connectors_api,
|
||||
)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ from llama_stack.providers.utils.responses.responses_store import (
|
|||
_OpenAIResponseObjectWithInputAndMessages,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
Connectors,
|
||||
ConversationItem,
|
||||
Conversations,
|
||||
Files,
|
||||
|
|
@ -79,6 +80,7 @@ class OpenAIResponsesImpl:
|
|||
conversations_api: Conversations,
|
||||
prompts_api: Prompts,
|
||||
files_api: Files,
|
||||
connectors_api: Connectors,
|
||||
):
|
||||
self.inference_api = inference_api
|
||||
self.tool_groups_api = tool_groups_api
|
||||
|
|
@ -94,6 +96,7 @@ class OpenAIResponsesImpl:
|
|||
)
|
||||
self.prompts_api = prompts_api
|
||||
self.files_api = files_api
|
||||
self.connectors_api = connectors_api
|
||||
|
||||
async def _prepend_previous_response(
|
||||
self,
|
||||
|
|
@ -494,6 +497,7 @@ class OpenAIResponsesImpl:
|
|||
instructions=instructions,
|
||||
max_tool_calls=max_tool_calls,
|
||||
metadata=metadata,
|
||||
connectors_api=self.connectors_api,
|
||||
)
|
||||
|
||||
# Stream the response
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import interleaved_con
|
|||
from llama_stack_api import (
|
||||
AllowedToolsFilter,
|
||||
ApprovalFilter,
|
||||
Connectors,
|
||||
Inference,
|
||||
MCPListToolsTool,
|
||||
ModelNotFoundError,
|
||||
|
|
@ -121,8 +122,10 @@ class StreamingResponseOrchestrator:
|
|||
parallel_tool_calls: bool | None = None,
|
||||
max_tool_calls: int | None = None,
|
||||
metadata: dict[str, str] | None = None,
|
||||
connectors_api: Connectors | None = None,
|
||||
):
|
||||
self.inference_api = inference_api
|
||||
self.connectors_api = connectors_api
|
||||
self.ctx = ctx
|
||||
self.response_id = response_id
|
||||
self.created_at = created_at
|
||||
|
|
@ -1088,6 +1091,15 @@ class StreamingResponseOrchestrator:
|
|||
"""Process an MCP tool configuration and emit appropriate streaming events."""
|
||||
from llama_stack.providers.utils.tools.mcp import list_mcp_tools
|
||||
|
||||
# Resolve connector_id to server_url if provided
|
||||
if mcp_tool.connector_id and not mcp_tool.server_url:
|
||||
if self.connectors_api is None:
|
||||
raise ValueError("Connectors API not available to resolve connector_id")
|
||||
server_url = self.connectors_api.get_connector_url(mcp_tool.connector_id)
|
||||
if not server_url:
|
||||
raise ValueError(f"Connector {mcp_tool.connector_id} not found")
|
||||
mcp_tool = mcp_tool.model_copy(update={"server_url": server_url})
|
||||
|
||||
# Emit mcp_list_tools.in_progress
|
||||
self.sequence_number += 1
|
||||
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ def available_providers() -> list[ProviderSpec]:
|
|||
Api.conversations,
|
||||
Api.prompts,
|
||||
Api.files,
|
||||
Api.connectors,
|
||||
],
|
||||
optional_api_dependencies=[
|
||||
Api.safety,
|
||||
|
|
|
|||
|
|
@ -46,10 +46,13 @@ from .common.content_types import (
|
|||
)
|
||||
from .common.errors import (
|
||||
ConflictError,
|
||||
ConnectorNotFoundError,
|
||||
ConnectorToolNotFoundError,
|
||||
DatasetNotFoundError,
|
||||
InvalidConversationIdError,
|
||||
ModelNotFoundError,
|
||||
ModelTypeError,
|
||||
RegistryNotFoundError,
|
||||
ResourceNotFoundError,
|
||||
TokenValidationError,
|
||||
ToolGroupNotFoundError,
|
||||
|
|
@ -500,6 +503,8 @@ __all__ = [
|
|||
"ConnectorInput",
|
||||
"Connectors",
|
||||
"ConnectorType",
|
||||
"ConnectorNotFoundError",
|
||||
"ConnectorToolNotFoundError",
|
||||
"Conversation",
|
||||
"ConversationDeletedResource",
|
||||
"ConversationItem",
|
||||
|
|
@ -775,6 +780,7 @@ __all__ = [
|
|||
"Ranker",
|
||||
"RegexParserScoringFnParams",
|
||||
"Registry",
|
||||
"RegistryNotFoundError",
|
||||
"RegistryType",
|
||||
"RegistryInput",
|
||||
"RemoteProviderConfig",
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Annotated, Any, Literal
|
||||
from typing import Annotated, Any, Literal, Self
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from typing_extensions import TypedDict
|
||||
|
|
@ -488,7 +488,8 @@ class OpenAIResponseInputToolMCP(BaseModel):
|
|||
|
||||
:param type: Tool type identifier, always "mcp"
|
||||
:param server_label: Label to identify this MCP server
|
||||
:param server_url: URL endpoint of the MCP server
|
||||
:param server_url: (Optional) URL endpoint of the MCP server
|
||||
:param connector_id: (Optional) ID of the connector to use for this MCP server
|
||||
:param headers: (Optional) HTTP headers to include when connecting to the server
|
||||
:param authorization: (Optional) OAuth access token for authenticating with the MCP server
|
||||
:param require_approval: Approval requirement for tool calls ("always", "never", or filter)
|
||||
|
|
@ -497,13 +498,20 @@ class OpenAIResponseInputToolMCP(BaseModel):
|
|||
|
||||
type: Literal["mcp"] = "mcp"
|
||||
server_label: str
|
||||
server_url: str
|
||||
server_url: str | None = None
|
||||
connector_id: str | None = None
|
||||
headers: dict[str, Any] | None = None
|
||||
authorization: str | None = Field(default=None, exclude=True)
|
||||
|
||||
require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
|
||||
allowed_tools: list[str] | AllowedToolsFilter | None = None
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_server_or_connector(self) -> Self:
|
||||
if not self.server_url and not self.connector_id:
|
||||
raise ValueError("Either 'server_url' or 'connector_id' must be provided for MCP tool")
|
||||
return self
|
||||
|
||||
|
||||
OpenAIResponseInputTool = Annotated[
|
||||
OpenAIResponseInputToolWebSearch
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue