fix(tests): address comments on telemetry test pr and support metrics

This commit is contained in:
Emilio Garcia 2025-10-14 19:17:50 -04:00
parent 83a5c9ee7b
commit 9198c4d5e2
3 changed files with 84 additions and 48 deletions

View file

@ -79,7 +79,6 @@ class TelemetryAdapter(Telemetry):
metrics.set_meter_provider(metric_provider) metrics.set_meter_provider(metric_provider)
self.meter = metrics.get_meter(__name__) self.meter = metrics.get_meter(__name__)
self._lock = _global_lock self._lock = _global_lock
async def initialize(self) -> None: async def initialize(self) -> None:

View file

@ -13,6 +13,7 @@ cannot access spans from a separate server process.
from typing import Any from typing import Any
import opentelemetry.metrics as otel_metrics
import opentelemetry.trace as otel_trace import opentelemetry.trace as otel_trace
import pytest import pytest
from opentelemetry import metrics, trace from opentelemetry import metrics, trace
@ -25,40 +26,61 @@ from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanE
import llama_stack.providers.inline.telemetry.meta_reference.telemetry as telemetry_module import llama_stack.providers.inline.telemetry.meta_reference.telemetry as telemetry_module
class OtelTestCollector: @pytest.fixture(scope="session")
"""In-memory collector for OpenTelemetry traces and metrics.""" def _setup_test_telemetry():
"""Session-scoped: Set up test telemetry providers before client initialization."""
# Reset OpenTelemetry's internal locks to allow test fixtures to override providers
if hasattr(otel_trace, "_TRACER_PROVIDER_SET_ONCE"):
otel_trace._TRACER_PROVIDER_SET_ONCE._done = False # type: ignore
if hasattr(otel_metrics, "_METER_PROVIDER_SET_ONCE"):
otel_metrics._METER_PROVIDER_SET_ONCE._done = False # type: ignore
def __init__(self): # Create and set up providers before client initialization
self.span_exporter = InMemorySpanExporter() span_exporter = InMemorySpanExporter()
self.tracer_provider = TracerProvider() tracer_provider = TracerProvider()
self.tracer_provider.add_span_processor(SimpleSpanProcessor(self.span_exporter)) tracer_provider.add_span_processor(SimpleSpanProcessor(span_exporter))
trace.set_tracer_provider(self.tracer_provider) trace.set_tracer_provider(tracer_provider)
self.metric_reader = InMemoryMetricReader() metric_reader = InMemoryMetricReader()
self.meter_provider = MeterProvider(metric_readers=[self.metric_reader]) meter_provider = MeterProvider(metric_readers=[metric_reader])
metrics.set_meter_provider(self.meter_provider) metrics.set_meter_provider(meter_provider)
# Set module-level providers so TelemetryAdapter uses them
telemetry_module._TRACER_PROVIDER = tracer_provider
yield tracer_provider, meter_provider, span_exporter, metric_reader
# Cleanup
telemetry_module._TRACER_PROVIDER = None
tracer_provider.shutdown()
meter_provider.shutdown()
class TestCollector:
def __init__(self, span_exp, metric_read):
assert span_exp and metric_read
self.span_exporter = span_exp
self.metric_reader = metric_read
def get_spans(self) -> tuple[ReadableSpan, ...]: def get_spans(self) -> tuple[ReadableSpan, ...]:
return self.span_exporter.get_finished_spans() return self.span_exporter.get_finished_spans()
def get_metrics(self) -> Any | None: def get_metrics(self) -> Any | None:
return self.metric_reader.get_metrics_data() metrics = self.metric_reader.get_metrics_data()
if metrics and metrics.resource_metrics:
def shutdown(self) -> None: return metrics.resource_metrics[0].scope_metrics[0].metrics
self.tracer_provider.shutdown() return None
self.meter_provider.shutdown()
@pytest.fixture @pytest.fixture
def mock_otlp_collector(): def mock_otlp_collector(_setup_test_telemetry):
"""Function-scoped: Fresh telemetry data view for each test.""" """Function-scoped: Access to telemetry data for each test."""
if hasattr(otel_trace, "_TRACER_PROVIDER_SET_ONCE"): # Unpack the providers from the session fixture
otel_trace._TRACER_PROVIDER_SET_ONCE._done = False # type: ignore tracer_provider, meter_provider, span_exporter, metric_reader = _setup_test_telemetry
collector = OtelTestCollector() collector = TestCollector(span_exporter, metric_reader)
telemetry_module._TRACER_PROVIDER = collector.tracer_provider
# Clear spans between tests
span_exporter.clear()
yield collector yield collector
telemetry_module._TRACER_PROVIDER = None
collector.shutdown()

View file

@ -41,8 +41,6 @@ def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_mod
def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client, text_model_id): def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client, text_model_id):
"""Comprehensive validation of telemetry data format including spans and metrics.""" """Comprehensive validation of telemetry data format including spans and metrics."""
collector = mock_otlp_collector
response = llama_stack_client.chat.completions.create( response = llama_stack_client.chat.completions.create(
model=text_model_id, model=text_model_id,
messages=[{"role": "user", "content": "Test trace openai with temperature 0.7"}], messages=[{"role": "user", "content": "Test trace openai with temperature 0.7"}],
@ -51,31 +49,48 @@ def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client,
stream=False, stream=False,
) )
assert response assert response.usage.get("prompt_tokens") > 0
assert response.usage.get("completion_tokens") > 0
assert response.usage.get("total_tokens") > 0
# Verify spans # Verify spans
spans = collector.get_spans() spans = mock_otlp_collector.get_spans()
assert len(spans) == 5 assert len(spans) == 5
for span in spans: for span in spans:
print(f"Span: {span.attributes}") attrs = span.attributes
if span.attributes.get("__autotraced__"): assert attrs is not None
assert span.attributes.get("__class__") and span.attributes.get("__method__")
assert span.attributes.get("__type__") in ["async", "sync", "async_generator"] # Root span is created manually by tracing middleware, not by @trace_protocol decorator
if span.attributes.get("__args__"): is_root_span = attrs.get("__root__") is True
args = json.loads(span.attributes.get("__args__"))
# The parameter is 'model' in openai_chat_completion, not 'model_id' if is_root_span:
if "model" in args: # Root spans have different attributes
assert attrs.get("__location__") in ["library_client", "server"]
else:
# Non-root spans are created by @trace_protocol decorator
assert attrs.get("__autotraced__")
assert attrs.get("__class__") and attrs.get("__method__")
assert attrs.get("__type__") in ["async", "sync", "async_generator"]
args = json.loads(attrs["__args__"])
if "model_id" in args:
assert args.get("model_id") == text_model_id
else:
assert args.get("model") == text_model_id assert args.get("model") == text_model_id
# Verify token metrics in response # Verify token usage metrics in response
# Note: Llama Stack emits token metrics in the response JSON, not via OTel Metrics API metrics = mock_otlp_collector.get_metrics()
usage = response.usage if hasattr(response, "usage") else response.get("usage") print(f"metrics: {metrics}")
assert usage assert metrics
prompt_tokens = usage.get("prompt_tokens") if isinstance(usage, dict) else usage.prompt_tokens for metric in metrics:
completion_tokens = usage.get("completion_tokens") if isinstance(usage, dict) else usage.completion_tokens assert metric.name in ["completion_tokens", "total_tokens", "prompt_tokens"]
total_tokens = usage.get("total_tokens") if isinstance(usage, dict) else usage.total_tokens assert metric.unit == "tokens"
assert metric.data.data_points and len(metric.data.data_points) == 1
assert prompt_tokens is not None and prompt_tokens > 0 match metric.name:
assert completion_tokens is not None and completion_tokens > 0 case "completion_tokens":
assert total_tokens is not None and total_tokens > 0 assert metric.data.data_points[0].value == response.usage.get("completion_tokens")
case "total_tokens":
assert metric.data.data_points[0].value == response.usage.get("total_tokens")
case "prompt_tokens":
assert metric.data.data_points[0].value == response.usage.get("prompt_tokens")