ci: Temperarily disable Telemetry during tests

We can re-enable again when thew problem is fixed.
Closes: #4089

Signed-off-by: Derek Higgins <derekh@redhat.com>
This commit is contained in:
Derek Higgins 2025-11-06 13:39:59 +00:00 committed by leseb
parent 03d23db910
commit ad4caf7b4a
2 changed files with 9 additions and 2 deletions

View file

@ -12,9 +12,13 @@ before and after each test, ensuring test isolation.
import json
import pytest
def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_model_id):
"""Verify streaming adds chunk_count and __type__=async_generator."""
pytest.skip("Disabled: See https://github.com/llamastack/llama-stack/issues/4089")
stream = llama_stack_client.chat.completions.create(
model=text_model_id,
messages=[{"role": "user", "content": "Test trace openai 1"}],
@ -50,6 +54,7 @@ def test_streaming_chunk_count(mock_otlp_collector, llama_stack_client, text_mod
def test_telemetry_format_completeness(mock_otlp_collector, llama_stack_client, text_model_id):
"""Comprehensive validation of telemetry data format including spans and metrics."""
pytest.skip("Disabled: See https://github.com/llamastack/llama-stack/issues/4089")
response = llama_stack_client.chat.completions.create(
model=text_model_id,
messages=[{"role": "user", "content": "Test trace openai with temperature 0.7"}],