mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
# What does this PR do? Fireworks doesn't allow repsonse_format with tool use. The default response format is 'text' anyway, so we can safely omit. ## Test Plan Below script failed without the change, runs after. ``` #!/usr/bin/env python3 """ Script to test Responses API with kubernetes-mcp-server. This script: 1. Connects to the llama stack server 2. Uses the Responses API with MCP tools 3. Asks for the list of Kubernetes namespaces using the kubernetes-mcp-server """ import json from openai import OpenAI # Connect to the llama stack server base_url = "http://localhost:8321/v1" client = OpenAI(base_url=base_url, api_key="fake") # Define the MCP tool pointing to the kubernetes-mcp-server # The kubernetes-mcp-server is running on port 3000 with SSE endpoint at /sse mcp_server_url = "http://localhost:3000/sse" tools = [ { "type": "mcp", "server_label": "k8s", "server_url": mcp_server_url, } ] # Create a response request asking for k8s namespaces print("Sending request to list Kubernetes namespaces...") print(f"Using MCP server at: {mcp_server_url}") print("Available tools will be listed automatically by the MCP server.") print() response = client.responses.create( # model="meta-llama/Llama-3.2-3B-Instruct", # Using the vllm model model="fireworks/accounts/fireworks/models/llama4-scout-instruct-basic", # model="openai/gpt-4o", input="what are all the Kubernetes namespaces? Use tool call to `namespaces_list`. make sure to adhere to the tool calling format UNDER ALL CIRCUMSTANCES.", tools=tools, stream=False, ) print("\n" + "=" * 80) print("RESPONSE OUTPUT:") print("=" * 80) # Print the output for i, output in enumerate(response.output): print(f"\n[Output {i + 1}] Type: {output.type}") if output.type == "mcp_list_tools": print(f" Server: {output.server_label}") print(f" Tools available: {[t.name for t in output.tools]}") elif output.type == "mcp_call": print(f" Tool called: {output.name}") print(f" Arguments: {output.arguments}") print(f" Result: {output.output}") if output.error: print(f" Error: {output.error}") elif output.type == "message": print(f" Role: {output.role}") print(f" Content: {output.content}") print("\n" + "=" * 80) print("FINAL RESPONSE TEXT:") print("=" * 80) print(response.output_text) ``` |
||
---|---|---|
.. | ||
cli | ||
distribution | ||
files | ||
models | ||
prompts/prompts | ||
providers | ||
rag | ||
registry | ||
server | ||
utils | ||
__init__.py | ||
conftest.py | ||
fixtures.py | ||
README.md |
Llama Stack Unit Tests
Unit Tests
Unit tests verify individual components and functions in isolation. They are fast, reliable, and don't require external services.
Prerequisites
- Python Environment: Ensure you have Python 3.12+ installed
- uv Package Manager: Install
uv
if not already installed
You can run the unit tests by running:
./scripts/unit-tests.sh [PYTEST_ARGS]
Any additional arguments are passed to pytest. For example, you can specify a test directory, a specific test file, or any pytest flags (e.g., -vvv for verbosity). If no test directory is specified, it defaults to "tests/unit", e.g:
./scripts/unit-tests.sh tests/unit/registry/test_registry.py -vvv
If you'd like to run for a non-default version of Python (currently 3.12), pass PYTHON_VERSION
variable as follows:
source .venv/bin/activate
PYTHON_VERSION=3.13 ./scripts/unit-tests.sh
Test Configuration
- Test Discovery: Tests are automatically discovered in the
tests/unit/
directory - Async Support: Tests use
--asyncio-mode=auto
for automatic async test handling - Coverage: Tests generate coverage reports in
htmlcov/
directory - Python Version: Defaults to Python 3.12, but can be overridden with
PYTHON_VERSION
environment variable
Coverage Reports
After running tests, you can view coverage reports:
# Open HTML coverage report in browser
open htmlcov/index.html # macOS
xdg-open htmlcov/index.html # Linux
start htmlcov/index.html # Windows