diff --git a/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py b/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py index 3ef3e055e..a2fbda656 100644 --- a/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py +++ b/src/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py @@ -56,9 +56,7 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime final_authorization = authorization or provider_auth return await list_mcp_tools( - endpoint=mcp_endpoint.uri, - headers=provider_headers, - authorization=final_authorization + endpoint=mcp_endpoint.uri, headers=provider_headers, authorization=final_authorization ) async def invoke_tool( @@ -109,7 +107,7 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime authorization = None provider_data = self.get_request_provider_data() - if provider_data and hasattr(provider_data, 'mcp_headers') and provider_data.mcp_headers: + if provider_data and hasattr(provider_data, "mcp_headers") and provider_data.mcp_headers: for uri, values in provider_data.mcp_headers.items(): if canonicalize_uri(uri) != canonicalize_uri(mcp_endpoint_uri): continue diff --git a/tests/integration/inference/test_tools_with_schemas.py b/tests/integration/inference/test_tools_with_schemas.py index 53f334527..5b6e69ae3 100644 --- a/tests/integration/inference/test_tools_with_schemas.py +++ b/tests/integration/inference/test_tools_with_schemas.py @@ -9,6 +9,8 @@ Integration tests for inference/chat completion with JSON Schema-based tools. Tests that tools pass through correctly to various LLM providers. """ +import json + import pytest from llama_stack.core.library_client import LlamaStackAsLibraryClient @@ -191,9 +193,22 @@ class TestMCPToolsInChatCompletion: mcp_endpoint=dict(uri=uri), ) + # Use old header-based approach for Phase 1 (backward compatibility) + provider_data = { + "mcp_headers": { + uri: { + "Authorization": f"Bearer {AUTH_TOKEN}", + }, + }, + } + auth_headers = { + "X-LlamaStack-Provider-Data": json.dumps(provider_data), + } + # Get the tools from MCP tools_response = llama_stack_client.tool_runtime.list_tools( tool_group_id=test_toolgroup_id, + extra_headers=auth_headers, ) # Convert to OpenAI format for inference