From 4c18239914a73ff2a1e975c526a2ff1c177cf226 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 11 Nov 2025 10:07:40 -0500 Subject: [PATCH] fix(telemetry): chat completions with metrics was dead code --- tests/integration/inference/test_provider_data_routing.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/integration/inference/test_provider_data_routing.py b/tests/integration/inference/test_provider_data_routing.py index e4a0a24b5..fc64f7eb7 100644 --- a/tests/integration/inference/test_provider_data_routing.py +++ b/tests/integration/inference/test_provider_data_routing.py @@ -17,7 +17,6 @@ from unittest.mock import AsyncMock, patch import pytest from llama_stack.core.library_client import LlamaStackAsLibraryClient -from llama_stack.core.telemetry.telemetry import MetricEvent from llama_stack_api import ( Api, OpenAIAssistantMessageParam, @@ -26,9 +25,7 @@ from llama_stack_api import ( OpenAIChoice, ) - -class OpenAIChatCompletionWithMetrics(OpenAIChatCompletion): - metrics: list[MetricEvent] | None = None +from llama_stack.core.library_client import LlamaStackAsLibraryClient def test_unregistered_model_routing_with_provider_data(client_with_models): @@ -72,7 +69,7 @@ def test_unregistered_model_routing_with_provider_data(client_with_models): # The inference router's routing_table.impls_by_provider_id should have anthropic # Let's patch the anthropic provider's openai_chat_completion method # to avoid making real API calls - mock_response = OpenAIChatCompletionWithMetrics( + mock_response = OpenAIChatCompletion( id="chatcmpl-test-123", created=1234567890, model="claude-3-5-sonnet-20241022",