diff --git a/tests/integration/telemetry/conftest.py b/tests/integration/telemetry/conftest.py index dfb400ce7..3d292be01 100644 --- a/tests/integration/telemetry/conftest.py +++ b/tests/integration/telemetry/conftest.py @@ -15,6 +15,9 @@ from llama_stack.testing.api_recorder import patch_httpx_for_test_id from tests.integration.fixtures.common import instantiate_llama_stack_client from tests.integration.telemetry.collectors import InMemoryTelemetryManager, OtlpHttpTestCollector + def clear_spans(self) -> None: + self.span_exporter.clear() + @pytest.fixture(scope="session") def telemetry_test_collector(): diff --git a/tests/integration/telemetry/test_completions.py b/tests/integration/telemetry/test_completions.py index 49ee4de32..0e6638857 100644 --- a/tests/integration/telemetry/test_completions.py +++ b/tests/integration/telemetry/test_completions.py @@ -11,6 +11,7 @@ import json def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_model_id): """Verify streaming adds chunk_count and __type__=async_generator.""" + mock_otlp_collector.clear() stream = llama_stack_client.chat.completions.create( model=text_model_id, @@ -40,6 +41,8 @@ def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_mod def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client, text_model_id): """Comprehensive validation of telemetry data format including spans and metrics.""" + mock_otlp_collector.clear() + response = llama_stack_client.chat.completions.create( model=text_model_id, messages=[{"role": "user", "content": "Test trace openai with temperature 0.7"}],