diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh index 008a420bf..506ac12e0 100755 --- a/scripts/integration-tests.sh +++ b/scripts/integration-tests.sh @@ -23,7 +23,7 @@ COLLECT_ONLY=false # Function to display usage usage() { - cat << EOF + cat < /dev/null; then +if [[ "$COLLECT_ONLY" == false ]] && ! command -v llama &>/dev/null; then echo "llama could not be found, ensure llama-stack is installed" exit 1 fi -if ! command -v pytest &> /dev/null; then +if ! command -v pytest &>/dev/null; then echo "pytest could not be found, ensure pytest is installed" exit 1 fi @@ -208,9 +207,18 @@ if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then echo "=== Starting Llama Stack Server ===" export LLAMA_STACK_LOG_WIDTH=120 + # Configure telemetry collector for server mode + # Use a fixed port for the OTEL collector so the server can connect to it + COLLECTOR_PORT=4317 + export LLAMA_STACK_TEST_COLLECTOR_PORT="${COLLECTOR_PORT}" + export OTEL_EXPORTER_OTLP_ENDPOINT="http://127.0.0.1:${COLLECTOR_PORT}" + export OTEL_EXPORTER_OTLP_PROTOCOL="http/protobuf" + export OTEL_BSP_SCHEDULE_DELAY="200" + export OTEL_BSP_EXPORT_TIMEOUT="2000" + # remove "server:" from STACK_CONFIG stack_config=$(echo "$STACK_CONFIG" | sed 's/^server://') - nohup llama stack run $stack_config > server.log 2>&1 & + nohup llama stack run $stack_config >server.log 2>&1 & echo "Waiting for Llama Stack Server to start..." for i in {1..30}; do @@ -239,7 +247,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then container_name="llama-stack-test-$DISTRO" if docker ps -a --format '{{.Names}}' | grep -q "^${container_name}$"; then echo "Dumping container logs before stopping..." - docker logs "$container_name" > "docker-${DISTRO}-${INFERENCE_MODE}.log" 2>&1 || true + docker logs "$container_name" >"docker-${DISTRO}-${INFERENCE_MODE}.log" 2>&1 || true echo "Stopping and removing container: $container_name" docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true @@ -271,16 +279,6 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then --build-arg "LLAMA_STACK_DIR=/workspace" ) - # Pass UV index configuration for release branches - if [[ -n "${UV_EXTRA_INDEX_URL:-}" ]]; then - echo "Adding UV_EXTRA_INDEX_URL to docker build: $UV_EXTRA_INDEX_URL" - build_cmd+=(--build-arg "UV_EXTRA_INDEX_URL=$UV_EXTRA_INDEX_URL") - fi - if [[ -n "${UV_INDEX_STRATEGY:-}" ]]; then - echo "Adding UV_INDEX_STRATEGY to docker build: $UV_INDEX_STRATEGY" - build_cmd+=(--build-arg "UV_INDEX_STRATEGY=$UV_INDEX_STRATEGY") - fi - if ! "${build_cmd[@]}"; then echo "❌ Failed to build Docker image" exit 1 @@ -294,10 +292,15 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then docker stop "$container_name" 2>/dev/null || true docker rm "$container_name" 2>/dev/null || true + # Configure telemetry collector port shared between host and container + COLLECTOR_PORT=4317 + export LLAMA_STACK_TEST_COLLECTOR_PORT="${COLLECTOR_PORT}" + # Build environment variables for docker run DOCKER_ENV_VARS="" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_INFERENCE_MODE=$INFERENCE_MODE" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_STACK_CONFIG_TYPE=server" + DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:${COLLECTOR_PORT}" # Pass through API keys if they exist [ -n "${TOGETHER_API_KEY:-}" ] && DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e TOGETHER_API_KEY=$TOGETHER_API_KEY" @@ -318,8 +321,20 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then fi echo "Using image: $IMAGE_NAME" - docker run -d --network host --name "$container_name" \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ + # On macOS/Darwin, --network host doesn't work as expected due to Docker running in a VM + # Use regular port mapping instead + NETWORK_MODE="" + PORT_MAPPINGS="" + if [[ "$(uname)" != "Darwin" ]] && [[ "$(uname)" != *"MINGW"* ]]; then + NETWORK_MODE="--network host" + else + # On non-Linux (macOS, Windows), need explicit port mappings for both app and telemetry + PORT_MAPPINGS="-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT -p $COLLECTOR_PORT:$COLLECTOR_PORT" + echo "Using bridge networking with port mapping (non-Linux)" + fi + + docker run -d $NETWORK_MODE --name "$container_name" \ + $PORT_MAPPINGS \ $DOCKER_ENV_VARS \ "$IMAGE_NAME" \ --port $LLAMA_STACK_PORT @@ -421,17 +436,13 @@ elif [ $exit_code -eq 5 ]; then else echo "❌ Tests failed" echo "" - echo "=== Dumping last 100 lines of logs for debugging ===" - # Output server or container logs based on stack config if [[ "$STACK_CONFIG" == *"server:"* && -f "server.log" ]]; then - echo "--- Last 100 lines of server.log ---" - tail -100 server.log + echo "--- Server side failures can be located inside server.log (available from artifacts on CI) ---" elif [[ "$STACK_CONFIG" == *"docker:"* ]]; then docker_log_file="docker-${DISTRO}-${INFERENCE_MODE}.log" if [[ -f "$docker_log_file" ]]; then - echo "--- Last 100 lines of $docker_log_file ---" - tail -100 "$docker_log_file" + echo "--- Server side failures can be located inside $docker_log_file (available from artifacts on CI) ---" fi fi