From ad4caf7b4a20ff752922e541be6b425500051a28 Mon Sep 17 00:00:00 2001 From: Derek Higgins Date: Thu, 6 Nov 2025 13:39:59 +0000 Subject: [PATCH] ci: Temperarily disable Telemetry during tests We can re-enable again when thew problem is fixed. Closes: #4089 Signed-off-by: Derek Higgins --- scripts/integration-tests.sh | 6 ++++-- tests/integration/telemetry/test_completions.py | 5 +++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/scripts/integration-tests.sh b/scripts/integration-tests.sh index e21f73f99..0951feb14 100755 --- a/scripts/integration-tests.sh +++ b/scripts/integration-tests.sh @@ -231,7 +231,8 @@ if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then # Use a fixed port for the OTEL collector so the server can connect to it COLLECTOR_PORT=4317 export LLAMA_STACK_TEST_COLLECTOR_PORT="${COLLECTOR_PORT}" - export OTEL_EXPORTER_OTLP_ENDPOINT="http://127.0.0.1:${COLLECTOR_PORT}" + # Disabled: https://github.com/llamastack/llama-stack/issues/4089 + #export OTEL_EXPORTER_OTLP_ENDPOINT="http://127.0.0.1:${COLLECTOR_PORT}" export OTEL_EXPORTER_OTLP_PROTOCOL="http/protobuf" export OTEL_BSP_SCHEDULE_DELAY="200" export OTEL_BSP_EXPORT_TIMEOUT="2000" @@ -337,7 +338,8 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then DOCKER_ENV_VARS="" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_INFERENCE_MODE=$INFERENCE_MODE" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e LLAMA_STACK_TEST_STACK_CONFIG_TYPE=server" - DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:${COLLECTOR_PORT}" + # Disabled: https://github.com/llamastack/llama-stack/issues/4089 + #DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:${COLLECTOR_PORT}" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_METRIC_EXPORT_INTERVAL=200" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_BSP_SCHEDULE_DELAY=200" DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OTEL_BSP_EXPORT_TIMEOUT=2000" diff --git a/tests/integration/telemetry/test_completions.py b/tests/integration/telemetry/test_completions.py index 2b8835f6c..af073d8bc 100644 --- a/tests/integration/telemetry/test_completions.py +++ b/tests/integration/telemetry/test_completions.py @@ -12,9 +12,13 @@ before and after each test, ensuring test isolation. import json +import pytest + def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_model_id): """Verify streaming adds chunk_count and __type__=async_generator.""" + + pytest.skip("Disabled: See https://github.com/llamastack/llama-stack/issues/4089") stream = llama_stack_client.chat.completions.create( model=text_model_id, messages=[{"role": "user", "content": "Test trace openai 1"}], @@ -50,6 +54,7 @@ def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_mod def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client, text_model_id): """Comprehensive validation of telemetry data format including spans and metrics.""" + pytest.skip("Disabled: See https://github.com/llamastack/llama-stack/issues/4089") response = llama_stack_client.chat.completions.create( model=text_model_id, messages=[{"role": "user", "content": "Test trace openai with temperature 0.7"}],