mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
integrate connectors api within responses api impl in agentic provider
Signed-off-by: Jaideep Rao <jrao@redhat.com>
This commit is contained in:
parent
3b1b37862e
commit
01a76e0084
13 changed files with 88 additions and 23 deletions
|
|
@ -7258,8 +7258,13 @@ components:
|
||||||
type: string
|
type: string
|
||||||
title: Server Label
|
title: Server Label
|
||||||
server_url:
|
server_url:
|
||||||
type: string
|
anyOf:
|
||||||
title: Server Url
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
connector_id:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
headers:
|
headers:
|
||||||
anyOf:
|
anyOf:
|
||||||
- additionalProperties: true
|
- additionalProperties: true
|
||||||
|
|
@ -7292,7 +7297,6 @@ components:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- server_label
|
- server_label
|
||||||
- server_url
|
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
|
|
|
||||||
10
docs/static/deprecated-llama-stack-spec.yaml
vendored
10
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -4039,8 +4039,13 @@ components:
|
||||||
type: string
|
type: string
|
||||||
title: Server Label
|
title: Server Label
|
||||||
server_url:
|
server_url:
|
||||||
type: string
|
anyOf:
|
||||||
title: Server Url
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
connector_id:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
headers:
|
headers:
|
||||||
anyOf:
|
anyOf:
|
||||||
- additionalProperties: true
|
- additionalProperties: true
|
||||||
|
|
@ -4073,7 +4078,6 @@ components:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- server_label
|
- server_label
|
||||||
- server_url
|
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
|
|
|
||||||
10
docs/static/experimental-llama-stack-spec.yaml
vendored
10
docs/static/experimental-llama-stack-spec.yaml
vendored
|
|
@ -3792,8 +3792,13 @@ components:
|
||||||
type: string
|
type: string
|
||||||
title: Server Label
|
title: Server Label
|
||||||
server_url:
|
server_url:
|
||||||
type: string
|
anyOf:
|
||||||
title: Server Url
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
connector_id:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
headers:
|
headers:
|
||||||
anyOf:
|
anyOf:
|
||||||
- additionalProperties: true
|
- additionalProperties: true
|
||||||
|
|
@ -3826,7 +3831,6 @@ components:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- server_label
|
- server_label
|
||||||
- server_url
|
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseObject:
|
OpenAIResponseObject:
|
||||||
|
|
|
||||||
10
docs/static/llama-stack-spec.yaml
vendored
10
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -5850,8 +5850,13 @@ components:
|
||||||
type: string
|
type: string
|
||||||
title: Server Label
|
title: Server Label
|
||||||
server_url:
|
server_url:
|
||||||
type: string
|
anyOf:
|
||||||
title: Server Url
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
connector_id:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
headers:
|
headers:
|
||||||
anyOf:
|
anyOf:
|
||||||
- additionalProperties: true
|
- additionalProperties: true
|
||||||
|
|
@ -5884,7 +5889,6 @@ components:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- server_label
|
- server_label
|
||||||
- server_url
|
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
|
|
|
||||||
10
docs/static/stainless-llama-stack-spec.yaml
vendored
10
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -7258,8 +7258,13 @@ components:
|
||||||
type: string
|
type: string
|
||||||
title: Server Label
|
title: Server Label
|
||||||
server_url:
|
server_url:
|
||||||
type: string
|
anyOf:
|
||||||
title: Server Url
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
connector_id:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
headers:
|
headers:
|
||||||
anyOf:
|
anyOf:
|
||||||
- additionalProperties: true
|
- additionalProperties: true
|
||||||
|
|
@ -7292,7 +7297,6 @@ components:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
- server_label
|
- server_label
|
||||||
- server_url
|
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
|
|
|
||||||
|
|
@ -14,18 +14,16 @@ from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.utils.tools.mcp import get_mcp_server_info, list_mcp_tools
|
from llama_stack.providers.utils.tools.mcp import get_mcp_server_info, list_mcp_tools
|
||||||
from llama_stack_api import (
|
from llama_stack_api import (
|
||||||
Connector,
|
Connector,
|
||||||
|
ConnectorNotFoundError,
|
||||||
Connectors,
|
Connectors,
|
||||||
|
ConnectorToolNotFoundError,
|
||||||
ConnectorType,
|
ConnectorType,
|
||||||
ListConnectorsResponse,
|
ListConnectorsResponse,
|
||||||
ListRegistriesResponse,
|
ListRegistriesResponse,
|
||||||
ListToolsResponse,
|
ListToolsResponse,
|
||||||
Registry,
|
Registry,
|
||||||
ToolDef,
|
|
||||||
)
|
|
||||||
from llama_stack_api.common.errors import (
|
|
||||||
ConnectorNotFoundError,
|
|
||||||
ConnectorToolNotFoundError,
|
|
||||||
RegistryNotFoundError,
|
RegistryNotFoundError,
|
||||||
|
ToolDef,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = get_logger(name=__name__, category="connectors")
|
logger = get_logger(name=__name__, category="connectors")
|
||||||
|
|
@ -55,6 +53,17 @@ class ConnectorServiceImpl(Connectors):
|
||||||
self.connectors_map: dict[str, Connector] = {}
|
self.connectors_map: dict[str, Connector] = {}
|
||||||
self.registries_map: dict[str, Registry] = {}
|
self.registries_map: dict[str, Registry] = {}
|
||||||
|
|
||||||
|
def get_connector_url(self, connector_id: str) -> str | None:
|
||||||
|
"""Get the URL of a connector by its ID.
|
||||||
|
|
||||||
|
:param connector_id: The ID of the connector to get the URL for.
|
||||||
|
:returns: The URL of the connector.
|
||||||
|
"""
|
||||||
|
connector = self.connectors_map.get(connector_id)
|
||||||
|
if connector is None:
|
||||||
|
return None
|
||||||
|
return connector.url
|
||||||
|
|
||||||
async def register_connector(
|
async def register_connector(
|
||||||
self,
|
self,
|
||||||
url: str,
|
url: str,
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ async def get_provider_impl(
|
||||||
deps[Api.conversations],
|
deps[Api.conversations],
|
||||||
deps[Api.prompts],
|
deps[Api.prompts],
|
||||||
deps[Api.files],
|
deps[Api.files],
|
||||||
|
deps[Api.connectors],
|
||||||
policy,
|
policy,
|
||||||
)
|
)
|
||||||
await impl.initialize()
|
await impl.initialize()
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
||||||
from llama_stack_api import (
|
from llama_stack_api import (
|
||||||
Agents,
|
Agents,
|
||||||
|
Connectors,
|
||||||
Conversations,
|
Conversations,
|
||||||
Files,
|
Files,
|
||||||
Inference,
|
Inference,
|
||||||
|
|
@ -49,6 +50,7 @@ class MetaReferenceAgentsImpl(Agents):
|
||||||
conversations_api: Conversations,
|
conversations_api: Conversations,
|
||||||
prompts_api: Prompts,
|
prompts_api: Prompts,
|
||||||
files_api: Files,
|
files_api: Files,
|
||||||
|
connectors_api: Connectors,
|
||||||
policy: list[AccessRule],
|
policy: list[AccessRule],
|
||||||
):
|
):
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
@ -60,6 +62,7 @@ class MetaReferenceAgentsImpl(Agents):
|
||||||
self.conversations_api = conversations_api
|
self.conversations_api = conversations_api
|
||||||
self.prompts_api = prompts_api
|
self.prompts_api = prompts_api
|
||||||
self.files_api = files_api
|
self.files_api = files_api
|
||||||
|
self.connectors_api = connectors_api
|
||||||
self.in_memory_store = InmemoryKVStoreImpl()
|
self.in_memory_store = InmemoryKVStoreImpl()
|
||||||
self.openai_responses_impl: OpenAIResponsesImpl | None = None
|
self.openai_responses_impl: OpenAIResponsesImpl | None = None
|
||||||
self.policy = policy
|
self.policy = policy
|
||||||
|
|
@ -78,6 +81,7 @@ class MetaReferenceAgentsImpl(Agents):
|
||||||
conversations_api=self.conversations_api,
|
conversations_api=self.conversations_api,
|
||||||
prompts_api=self.prompts_api,
|
prompts_api=self.prompts_api,
|
||||||
files_api=self.files_api,
|
files_api=self.files_api,
|
||||||
|
connectors_api=self.connectors_api,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def shutdown(self) -> None:
|
async def shutdown(self) -> None:
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ from llama_stack.providers.utils.responses.responses_store import (
|
||||||
_OpenAIResponseObjectWithInputAndMessages,
|
_OpenAIResponseObjectWithInputAndMessages,
|
||||||
)
|
)
|
||||||
from llama_stack_api import (
|
from llama_stack_api import (
|
||||||
|
Connectors,
|
||||||
ConversationItem,
|
ConversationItem,
|
||||||
Conversations,
|
Conversations,
|
||||||
Files,
|
Files,
|
||||||
|
|
@ -79,6 +80,7 @@ class OpenAIResponsesImpl:
|
||||||
conversations_api: Conversations,
|
conversations_api: Conversations,
|
||||||
prompts_api: Prompts,
|
prompts_api: Prompts,
|
||||||
files_api: Files,
|
files_api: Files,
|
||||||
|
connectors_api: Connectors,
|
||||||
):
|
):
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.tool_groups_api = tool_groups_api
|
self.tool_groups_api = tool_groups_api
|
||||||
|
|
@ -94,6 +96,7 @@ class OpenAIResponsesImpl:
|
||||||
)
|
)
|
||||||
self.prompts_api = prompts_api
|
self.prompts_api = prompts_api
|
||||||
self.files_api = files_api
|
self.files_api = files_api
|
||||||
|
self.connectors_api = connectors_api
|
||||||
|
|
||||||
async def _prepend_previous_response(
|
async def _prepend_previous_response(
|
||||||
self,
|
self,
|
||||||
|
|
@ -494,6 +497,7 @@ class OpenAIResponsesImpl:
|
||||||
instructions=instructions,
|
instructions=instructions,
|
||||||
max_tool_calls=max_tool_calls,
|
max_tool_calls=max_tool_calls,
|
||||||
metadata=metadata,
|
metadata=metadata,
|
||||||
|
connectors_api=self.connectors_api,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Stream the response
|
# Stream the response
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import interleaved_con
|
||||||
from llama_stack_api import (
|
from llama_stack_api import (
|
||||||
AllowedToolsFilter,
|
AllowedToolsFilter,
|
||||||
ApprovalFilter,
|
ApprovalFilter,
|
||||||
|
Connectors,
|
||||||
Inference,
|
Inference,
|
||||||
MCPListToolsTool,
|
MCPListToolsTool,
|
||||||
ModelNotFoundError,
|
ModelNotFoundError,
|
||||||
|
|
@ -121,8 +122,10 @@ class StreamingResponseOrchestrator:
|
||||||
parallel_tool_calls: bool | None = None,
|
parallel_tool_calls: bool | None = None,
|
||||||
max_tool_calls: int | None = None,
|
max_tool_calls: int | None = None,
|
||||||
metadata: dict[str, str] | None = None,
|
metadata: dict[str, str] | None = None,
|
||||||
|
connectors_api: Connectors | None = None,
|
||||||
):
|
):
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
|
self.connectors_api = connectors_api
|
||||||
self.ctx = ctx
|
self.ctx = ctx
|
||||||
self.response_id = response_id
|
self.response_id = response_id
|
||||||
self.created_at = created_at
|
self.created_at = created_at
|
||||||
|
|
@ -1088,6 +1091,15 @@ class StreamingResponseOrchestrator:
|
||||||
"""Process an MCP tool configuration and emit appropriate streaming events."""
|
"""Process an MCP tool configuration and emit appropriate streaming events."""
|
||||||
from llama_stack.providers.utils.tools.mcp import list_mcp_tools
|
from llama_stack.providers.utils.tools.mcp import list_mcp_tools
|
||||||
|
|
||||||
|
# Resolve connector_id to server_url if provided
|
||||||
|
if mcp_tool.connector_id and not mcp_tool.server_url:
|
||||||
|
if self.connectors_api is None:
|
||||||
|
raise ValueError("Connectors API not available to resolve connector_id")
|
||||||
|
server_url = self.connectors_api.get_connector_url(mcp_tool.connector_id)
|
||||||
|
if not server_url:
|
||||||
|
raise ValueError(f"Connector {mcp_tool.connector_id} not found")
|
||||||
|
mcp_tool = mcp_tool.model_copy(update={"server_url": server_url})
|
||||||
|
|
||||||
# Emit mcp_list_tools.in_progress
|
# Emit mcp_list_tools.in_progress
|
||||||
self.sequence_number += 1
|
self.sequence_number += 1
|
||||||
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(
|
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ def available_providers() -> list[ProviderSpec]:
|
||||||
Api.conversations,
|
Api.conversations,
|
||||||
Api.prompts,
|
Api.prompts,
|
||||||
Api.files,
|
Api.files,
|
||||||
|
Api.connectors,
|
||||||
],
|
],
|
||||||
optional_api_dependencies=[
|
optional_api_dependencies=[
|
||||||
Api.safety,
|
Api.safety,
|
||||||
|
|
|
||||||
|
|
@ -46,10 +46,13 @@ from .common.content_types import (
|
||||||
)
|
)
|
||||||
from .common.errors import (
|
from .common.errors import (
|
||||||
ConflictError,
|
ConflictError,
|
||||||
|
ConnectorNotFoundError,
|
||||||
|
ConnectorToolNotFoundError,
|
||||||
DatasetNotFoundError,
|
DatasetNotFoundError,
|
||||||
InvalidConversationIdError,
|
InvalidConversationIdError,
|
||||||
ModelNotFoundError,
|
ModelNotFoundError,
|
||||||
ModelTypeError,
|
ModelTypeError,
|
||||||
|
RegistryNotFoundError,
|
||||||
ResourceNotFoundError,
|
ResourceNotFoundError,
|
||||||
TokenValidationError,
|
TokenValidationError,
|
||||||
ToolGroupNotFoundError,
|
ToolGroupNotFoundError,
|
||||||
|
|
@ -500,6 +503,8 @@ __all__ = [
|
||||||
"ConnectorInput",
|
"ConnectorInput",
|
||||||
"Connectors",
|
"Connectors",
|
||||||
"ConnectorType",
|
"ConnectorType",
|
||||||
|
"ConnectorNotFoundError",
|
||||||
|
"ConnectorToolNotFoundError",
|
||||||
"Conversation",
|
"Conversation",
|
||||||
"ConversationDeletedResource",
|
"ConversationDeletedResource",
|
||||||
"ConversationItem",
|
"ConversationItem",
|
||||||
|
|
@ -775,6 +780,7 @@ __all__ = [
|
||||||
"Ranker",
|
"Ranker",
|
||||||
"RegexParserScoringFnParams",
|
"RegexParserScoringFnParams",
|
||||||
"Registry",
|
"Registry",
|
||||||
|
"RegistryNotFoundError",
|
||||||
"RegistryType",
|
"RegistryType",
|
||||||
"RegistryInput",
|
"RegistryInput",
|
||||||
"RemoteProviderConfig",
|
"RemoteProviderConfig",
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from collections.abc import Sequence
|
from collections.abc import Sequence
|
||||||
from typing import Annotated, Any, Literal
|
from typing import Annotated, Any, Literal, Self
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, model_validator
|
from pydantic import BaseModel, Field, model_validator
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
@ -488,7 +488,8 @@ class OpenAIResponseInputToolMCP(BaseModel):
|
||||||
|
|
||||||
:param type: Tool type identifier, always "mcp"
|
:param type: Tool type identifier, always "mcp"
|
||||||
:param server_label: Label to identify this MCP server
|
:param server_label: Label to identify this MCP server
|
||||||
:param server_url: URL endpoint of the MCP server
|
:param server_url: (Optional) URL endpoint of the MCP server
|
||||||
|
:param connector_id: (Optional) ID of the connector to use for this MCP server
|
||||||
:param headers: (Optional) HTTP headers to include when connecting to the server
|
:param headers: (Optional) HTTP headers to include when connecting to the server
|
||||||
:param authorization: (Optional) OAuth access token for authenticating with the MCP server
|
:param authorization: (Optional) OAuth access token for authenticating with the MCP server
|
||||||
:param require_approval: Approval requirement for tool calls ("always", "never", or filter)
|
:param require_approval: Approval requirement for tool calls ("always", "never", or filter)
|
||||||
|
|
@ -497,13 +498,20 @@ class OpenAIResponseInputToolMCP(BaseModel):
|
||||||
|
|
||||||
type: Literal["mcp"] = "mcp"
|
type: Literal["mcp"] = "mcp"
|
||||||
server_label: str
|
server_label: str
|
||||||
server_url: str
|
server_url: str | None = None
|
||||||
|
connector_id: str | None = None
|
||||||
headers: dict[str, Any] | None = None
|
headers: dict[str, Any] | None = None
|
||||||
authorization: str | None = Field(default=None, exclude=True)
|
authorization: str | None = Field(default=None, exclude=True)
|
||||||
|
|
||||||
require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
|
require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
|
||||||
allowed_tools: list[str] | AllowedToolsFilter | None = None
|
allowed_tools: list[str] | AllowedToolsFilter | None = None
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def validate_server_or_connector(self) -> Self:
|
||||||
|
if not self.server_url and not self.connector_id:
|
||||||
|
raise ValueError("Either 'server_url' or 'connector_id' must be provided for MCP tool")
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
OpenAIResponseInputTool = Annotated[
|
OpenAIResponseInputTool = Annotated[
|
||||||
OpenAIResponseInputToolWebSearch
|
OpenAIResponseInputToolWebSearch
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue