test: enable telemetry tests in server mode (#3927)

# What does this PR do?
- added a server-based test OLTP collector

## Test Plan
CI
This commit is contained in:
ehhuang 2025-10-28 16:33:48 -07:00 committed by GitHub
parent 1f9d48cd54
commit 1aa8979050
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 500 additions and 91 deletions

View file

@ -4,21 +4,47 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
"""Telemetry tests verifying @trace_protocol decorator format using in-memory exporter."""
"""Telemetry tests verifying @trace_protocol decorator format across stack modes."""
import json
import os
import pytest
pytestmark = pytest.mark.skipif(
os.environ.get("LLAMA_STACK_TEST_STACK_CONFIG_TYPE") == "server",
reason="In-memory telemetry tests only work in library_client mode (server mode runs in separate process)",
)
def _span_attributes(span):
attrs = getattr(span, "attributes", None)
if attrs is None:
return {}
# ReadableSpan.attributes acts like a mapping
try:
return dict(attrs.items()) # type: ignore[attr-defined]
except AttributeError:
try:
return dict(attrs)
except TypeError:
return attrs
def _span_attr(span, key):
attrs = _span_attributes(span)
return attrs.get(key)
def _span_trace_id(span):
context = getattr(span, "context", None)
if context and getattr(context, "trace_id", None) is not None:
return f"{context.trace_id:032x}"
return getattr(span, "trace_id", None)
def _span_has_message(span, text: str) -> bool:
args = _span_attr(span, "__args__")
if not args or not isinstance(args, str):
return False
return text in args
def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_model_id):
"""Verify streaming adds chunk_count and __type__=async_generator."""
mock_otlp_collector.clear()
stream = llama_stack_client.chat.completions.create(
model=text_model_id,
@ -29,23 +55,33 @@ def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_mod
chunks = list(stream)
assert len(chunks) > 0
spans = mock_otlp_collector.get_spans()
spans = mock_otlp_collector.get_spans(expected_count=5)
assert len(spans) > 0
chunk_count = None
for span in spans:
if span.attributes.get("__type__") == "async_generator":
chunk_count = span.attributes.get("chunk_count")
if chunk_count:
chunk_count = int(chunk_count)
break
async_generator_span = next(
(
span
for span in reversed(spans)
if _span_attr(span, "__type__") == "async_generator"
and _span_attr(span, "chunk_count")
and _span_has_message(span, "Test trace openai 1")
),
None,
)
assert async_generator_span is not None
raw_chunk_count = _span_attr(async_generator_span, "chunk_count")
assert raw_chunk_count is not None
chunk_count = int(raw_chunk_count)
assert chunk_count is not None
assert chunk_count == len(chunks)
def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client, text_model_id):
"""Comprehensive validation of telemetry data format including spans and metrics."""
mock_otlp_collector.clear()
response = llama_stack_client.chat.completions.create(
model=text_model_id,
messages=[{"role": "user", "content": "Test trace openai with temperature 0.7"}],
@ -63,30 +99,41 @@ def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client,
assert usage.get("total_tokens") and usage["total_tokens"] > 0
# Verify spans
spans = mock_otlp_collector.get_spans()
# Expected spans: 1 root span + 3 autotraced method calls from routing/inference
assert len(spans) == 4, f"Expected 4 spans, got {len(spans)}"
spans = mock_otlp_collector.get_spans(expected_count=7)
target_span = next(
(span for span in reversed(spans) if _span_has_message(span, "Test trace openai with temperature 0.7")),
None,
)
assert target_span is not None
trace_id = _span_trace_id(target_span)
assert trace_id is not None
spans = [span for span in spans if _span_trace_id(span) == trace_id]
spans = [span for span in spans if _span_attr(span, "__root__") or _span_attr(span, "__autotraced__")]
assert len(spans) >= 4
# Collect all model_ids found in spans
logged_model_ids = []
for span in spans:
attrs = span.attributes
attrs = _span_attributes(span)
assert attrs is not None
# Root span is created manually by tracing middleware, not by @trace_protocol decorator
is_root_span = attrs.get("__root__") is True
if is_root_span:
# Root spans have different attributes
assert attrs.get("__location__") in ["library_client", "server"]
else:
# Non-root spans are created by @trace_protocol decorator
assert attrs.get("__autotraced__")
assert attrs.get("__class__") and attrs.get("__method__")
assert attrs.get("__type__") in ["async", "sync", "async_generator"]
continue
args = json.loads(attrs["__args__"])
assert attrs.get("__autotraced__")
assert attrs.get("__class__") and attrs.get("__method__")
assert attrs.get("__type__") in ["async", "sync", "async_generator"]
args_field = attrs.get("__args__")
if args_field:
args = json.loads(args_field)
if "model_id" in args:
logged_model_ids.append(args["model_id"])