mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
Updated the test cases to support the headers for now
This commit is contained in:
parent
8783255bc3
commit
c1b63202be
2 changed files with 17 additions and 4 deletions
|
|
@ -9,6 +9,8 @@ Integration tests for inference/chat completion with JSON Schema-based tools.
|
|||
Tests that tools pass through correctly to various LLM providers.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||
|
|
@ -191,9 +193,22 @@ class TestMCPToolsInChatCompletion:
|
|||
mcp_endpoint=dict(uri=uri),
|
||||
)
|
||||
|
||||
# Use old header-based approach for Phase 1 (backward compatibility)
|
||||
provider_data = {
|
||||
"mcp_headers": {
|
||||
uri: {
|
||||
"Authorization": f"Bearer {AUTH_TOKEN}",
|
||||
},
|
||||
},
|
||||
}
|
||||
auth_headers = {
|
||||
"X-LlamaStack-Provider-Data": json.dumps(provider_data),
|
||||
}
|
||||
|
||||
# Get the tools from MCP
|
||||
tools_response = llama_stack_client.tool_runtime.list_tools(
|
||||
tool_group_id=test_toolgroup_id,
|
||||
extra_headers=auth_headers,
|
||||
)
|
||||
|
||||
# Convert to OpenAI format for inference
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue