mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
feat(tests): enable MCP tests in server mode (#4146)
We would like to run all OpenAI compatibility tests using only the openai-client library. This is most friendly for contributors since they can run tests without needing to update the client-sdks (which is getting easier but still a long pole.) This is the first step in enabling that -- no using "library client" for any of the Responses tests. This seems like a reasonable trade-off since the usage of an embeddeble library client for Responses (or any OpenAI-compatible) behavior seems to be not very common. To do this, we needed to enable MCP tests (which only worked in library client mode) for server mode.
This commit is contained in:
parent
9eb81439d2
commit
1e81056a22
29 changed files with 13388 additions and 127 deletions
|
|
@ -162,6 +162,17 @@ if [[ "$COLLECT_ONLY" == false ]]; then
|
|||
export LLAMA_STACK_TEST_STACK_CONFIG_TYPE="library_client"
|
||||
echo "Setting stack config type: library_client"
|
||||
fi
|
||||
|
||||
# Set MCP host for in-process MCP server tests
|
||||
# - For library client and server mode: localhost (both on same host)
|
||||
# - For docker mode: host.docker.internal (container needs to reach host)
|
||||
if [[ "$STACK_CONFIG" == docker:* ]]; then
|
||||
export LLAMA_STACK_TEST_MCP_HOST="host.docker.internal"
|
||||
echo "Setting MCP host: host.docker.internal (docker mode)"
|
||||
else
|
||||
export LLAMA_STACK_TEST_MCP_HOST="localhost"
|
||||
echo "Setting MCP host: localhost (library/server mode)"
|
||||
fi
|
||||
fi
|
||||
|
||||
SETUP_ENV=$(PYTHONPATH=$THIS_DIR/.. python "$THIS_DIR/get_setup_env.py" --suite "$TEST_SUITE" --setup "$TEST_SETUP" --format bash)
|
||||
|
|
@ -338,6 +349,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then
|
|||
DOCKER_ENV_VARS=""
|
||||
DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_INFERENCE_MODE=$INFERENCE_MODE"
|
||||
DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_STACK_CONFIG_TYPE=server"
|
||||
DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_MCP_HOST=${LLAMA_STACK_TEST_MCP_HOST:-host.docker.internal}"
|
||||
# Disabled: https://github.com/llamastack/llama-stack/issues/4089
|
||||
#DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:${COLLECTOR_PORT}"
|
||||
DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_METRIC_EXPORT_INTERVAL=200"
|
||||
|
|
@ -371,8 +383,11 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then
|
|||
# Use regular port mapping instead
|
||||
NETWORK_MODE=""
|
||||
PORT_MAPPINGS=""
|
||||
ADD_HOST_FLAG=""
|
||||
if [[ "$(uname)" != "Darwin" ]] && [[ "$(uname)" != *"MINGW"* ]]; then
|
||||
NETWORK_MODE="--network host"
|
||||
# On Linux with host network, also add host.docker.internal mapping for consistency
|
||||
ADD_HOST_FLAG="--add-host=host.docker.internal:host-gateway"
|
||||
else
|
||||
# On non-Linux (macOS, Windows), need explicit port mappings for both app and telemetry
|
||||
PORT_MAPPINGS="-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT -p $COLLECTOR_PORT:$COLLECTOR_PORT"
|
||||
|
|
@ -381,6 +396,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then
|
|||
|
||||
docker run -d $NETWORK_MODE --name "$container_name" \
|
||||
$PORT_MAPPINGS \
|
||||
$ADD_HOST_FLAG \
|
||||
$DOCKER_ENV_VARS \
|
||||
"$IMAGE_NAME" \
|
||||
--port $LLAMA_STACK_PORT
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue