mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
Updated the test cases to support the headers for now
This commit is contained in:
parent
8783255bc3
commit
c1b63202be
2 changed files with 17 additions and 4 deletions
|
|
@ -56,9 +56,7 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime
|
||||||
final_authorization = authorization or provider_auth
|
final_authorization = authorization or provider_auth
|
||||||
|
|
||||||
return await list_mcp_tools(
|
return await list_mcp_tools(
|
||||||
endpoint=mcp_endpoint.uri,
|
endpoint=mcp_endpoint.uri, headers=provider_headers, authorization=final_authorization
|
||||||
headers=provider_headers,
|
|
||||||
authorization=final_authorization
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def invoke_tool(
|
async def invoke_tool(
|
||||||
|
|
@ -109,7 +107,7 @@ class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime
|
||||||
authorization = None
|
authorization = None
|
||||||
|
|
||||||
provider_data = self.get_request_provider_data()
|
provider_data = self.get_request_provider_data()
|
||||||
if provider_data and hasattr(provider_data, 'mcp_headers') and provider_data.mcp_headers:
|
if provider_data and hasattr(provider_data, "mcp_headers") and provider_data.mcp_headers:
|
||||||
for uri, values in provider_data.mcp_headers.items():
|
for uri, values in provider_data.mcp_headers.items():
|
||||||
if canonicalize_uri(uri) != canonicalize_uri(mcp_endpoint_uri):
|
if canonicalize_uri(uri) != canonicalize_uri(mcp_endpoint_uri):
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,8 @@ Integration tests for inference/chat completion with JSON Schema-based tools.
|
||||||
Tests that tools pass through correctly to various LLM providers.
|
Tests that tools pass through correctly to various LLM providers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||||
|
|
@ -191,9 +193,22 @@ class TestMCPToolsInChatCompletion:
|
||||||
mcp_endpoint=dict(uri=uri),
|
mcp_endpoint=dict(uri=uri),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Use old header-based approach for Phase 1 (backward compatibility)
|
||||||
|
provider_data = {
|
||||||
|
"mcp_headers": {
|
||||||
|
uri: {
|
||||||
|
"Authorization": f"Bearer {AUTH_TOKEN}",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
auth_headers = {
|
||||||
|
"X-LlamaStack-Provider-Data": json.dumps(provider_data),
|
||||||
|
}
|
||||||
|
|
||||||
# Get the tools from MCP
|
# Get the tools from MCP
|
||||||
tools_response = llama_stack_client.tool_runtime.list_tools(
|
tools_response = llama_stack_client.tool_runtime.list_tools(
|
||||||
tool_group_id=test_toolgroup_id,
|
tool_group_id=test_toolgroup_id,
|
||||||
|
extra_headers=auth_headers,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Convert to OpenAI format for inference
|
# Convert to OpenAI format for inference
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue