fix(telemetry): chat completions with metrics was dead code

This commit is contained in:
Emilio Garcia 2025-11-11 10:07:40 -05:00
parent 16975d0849
commit 4c18239914

View file

@ -17,7 +17,6 @@ from unittest.mock import AsyncMock, patch
import pytest
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from llama_stack.core.telemetry.telemetry import MetricEvent
from llama_stack_api import (
Api,
OpenAIAssistantMessageParam,
@ -26,9 +25,7 @@ from llama_stack_api import (
OpenAIChoice,
)
class OpenAIChatCompletionWithMetrics(OpenAIChatCompletion):
metrics: list[MetricEvent] | None = None
from llama_stack.core.library_client import LlamaStackAsLibraryClient
def test_unregistered_model_routing_with_provider_data(client_with_models):
@ -72,7 +69,7 @@ def test_unregistered_model_routing_with_provider_data(client_with_models):
# The inference router's routing_table.impls_by_provider_id should have anthropic
# Let's patch the anthropic provider's openai_chat_completion method
# to avoid making real API calls
mock_response = OpenAIChatCompletionWithMetrics(
mock_response = OpenAIChatCompletion(
id="chatcmpl-test-123",
created=1234567890,
model="claude-3-5-sonnet-20241022",