mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
We would like to run all OpenAI compatibility tests using only the openai-client library. This is most friendly for contributors since they can run tests without needing to update the client-sdks (which is getting easier but still a long pole.) This is the first step in enabling that -- no using "library client" for any of the Responses tests. This seems like a reasonable trade-off since the usage of an embeddeble library client for Responses (or any OpenAI-compatible) behavior seems to be not very common. To do this, we needed to enable MCP tests (which only worked in library client mode) for server mode.
128 lines
3.9 KiB
Python
128 lines
3.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import json
|
|
|
|
import pytest
|
|
from llama_stack_client.lib.agents.agent import Agent
|
|
from llama_stack_client.lib.agents.turn_events import StepCompleted, StepProgress, ToolCallIssuedDelta
|
|
|
|
AUTH_TOKEN = "test-token"
|
|
|
|
from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def mcp_server():
|
|
with make_mcp_server(required_auth_token=AUTH_TOKEN) as mcp_server_info:
|
|
yield mcp_server_info
|
|
|
|
|
|
def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
|
|
test_toolgroup_id = MCP_TOOLGROUP_ID
|
|
uri = mcp_server["server_url"]
|
|
|
|
# registering should not raise an error anymore even if you don't specify the auth token
|
|
try:
|
|
llama_stack_client.toolgroups.unregister(toolgroup_id=test_toolgroup_id)
|
|
except Exception:
|
|
pass
|
|
|
|
llama_stack_client.toolgroups.register(
|
|
toolgroup_id=test_toolgroup_id,
|
|
provider_id="model-context-protocol",
|
|
mcp_endpoint=dict(uri=uri),
|
|
)
|
|
|
|
provider_data = {
|
|
"mcp_headers": {
|
|
uri: {
|
|
"Authorization": f"Bearer {AUTH_TOKEN}",
|
|
},
|
|
},
|
|
}
|
|
auth_headers = {
|
|
"X-LlamaStack-Provider-Data": json.dumps(provider_data),
|
|
}
|
|
|
|
with pytest.raises(Exception, match="Unauthorized"):
|
|
llama_stack_client.tools.list(toolgroup_id=test_toolgroup_id)
|
|
|
|
tools_list = llama_stack_client.tools.list(
|
|
toolgroup_id=test_toolgroup_id,
|
|
extra_headers=auth_headers,
|
|
)
|
|
assert len(tools_list) == 2
|
|
assert {t.name for t in tools_list} == {"greet_everyone", "get_boiling_point"}
|
|
|
|
response = llama_stack_client.tool_runtime.invoke_tool(
|
|
tool_name="greet_everyone",
|
|
kwargs=dict(url="https://www.google.com"),
|
|
extra_headers=auth_headers,
|
|
)
|
|
content = response.content
|
|
assert len(content) == 1
|
|
assert content[0].type == "text"
|
|
assert content[0].text == "Hello, world!"
|
|
|
|
print(f"Using model: {text_model_id}")
|
|
tool_defs = [
|
|
{
|
|
"type": "mcp",
|
|
"server_url": uri,
|
|
"server_label": test_toolgroup_id,
|
|
"require_approval": "never",
|
|
"allowed_tools": [tool.name for tool in tools_list],
|
|
"headers": {
|
|
"Authorization": f"Bearer {AUTH_TOKEN}",
|
|
},
|
|
}
|
|
]
|
|
agent = Agent(
|
|
client=llama_stack_client,
|
|
model=text_model_id,
|
|
instructions="You are a helpful assistant.",
|
|
tools=tool_defs,
|
|
)
|
|
session_id = agent.create_session("test-session")
|
|
chunks = list(
|
|
agent.create_turn(
|
|
session_id=session_id,
|
|
messages=[
|
|
{
|
|
"type": "message",
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "input_text",
|
|
"text": "Say hi to the world. Use tools to do so.",
|
|
}
|
|
],
|
|
}
|
|
],
|
|
stream=True,
|
|
extra_headers=auth_headers,
|
|
)
|
|
)
|
|
events = [chunk.event for chunk in chunks]
|
|
|
|
final_response = next((chunk.response for chunk in reversed(chunks) if chunk.response), None)
|
|
assert final_response is not None
|
|
|
|
issued_calls = [
|
|
event for event in events if isinstance(event, StepProgress) and isinstance(event.delta, ToolCallIssuedDelta)
|
|
]
|
|
assert issued_calls
|
|
|
|
assert issued_calls[-1].delta.tool_name == "greet_everyone"
|
|
|
|
tool_events = [
|
|
event for event in events if isinstance(event, StepCompleted) and event.step_type == "tool_execution"
|
|
]
|
|
assert tool_events
|
|
assert tool_events[-1].result.tool_calls[0].tool_name == "greet_everyone"
|
|
|
|
assert "hello" in final_response.output_text.lower()
|