mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
feat(tests): enable MCP tests in server mode (#4146)
We would like to run all OpenAI compatibility tests using only the openai-client library. This is most friendly for contributors since they can run tests without needing to update the client-sdks (which is getting easier but still a long pole.) This is the first step in enabling that -- no using "library client" for any of the Responses tests. This seems like a reasonable trade-off since the usage of an embeddeble library client for Responses (or any OpenAI-compatible) behavior seems to be not very common. To do this, we needed to enable MCP tests (which only worked in library client mode) for server mode.
This commit is contained in:
parent
9eb81439d2
commit
1e81056a22
29 changed files with 13388 additions and 127 deletions
|
|
@ -10,8 +10,6 @@ import pytest
|
|||
from llama_stack_client.lib.agents.agent import Agent
|
||||
from llama_stack_client.lib.agents.turn_events import StepCompleted, StepProgress, ToolCallIssuedDelta
|
||||
|
||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||
|
||||
AUTH_TOKEN = "test-token"
|
||||
|
||||
from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server
|
||||
|
|
@ -24,9 +22,6 @@ def mcp_server():
|
|||
|
||||
|
||||
def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
|
||||
if not isinstance(llama_stack_client, LlamaStackAsLibraryClient):
|
||||
pytest.skip("The local MCP server only reliably reachable from library client.")
|
||||
|
||||
test_toolgroup_id = MCP_TOOLGROUP_ID
|
||||
uri = mcp_server["server_url"]
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue