mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix(telemetry): chat completions with metrics was dead code
This commit is contained in:
parent
16975d0849
commit
4c18239914
1 changed files with 2 additions and 5 deletions
|
|
@ -17,7 +17,6 @@ from unittest.mock import AsyncMock, patch
|
|||
import pytest
|
||||
|
||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||
from llama_stack.core.telemetry.telemetry import MetricEvent
|
||||
from llama_stack_api import (
|
||||
Api,
|
||||
OpenAIAssistantMessageParam,
|
||||
|
|
@ -26,9 +25,7 @@ from llama_stack_api import (
|
|||
OpenAIChoice,
|
||||
)
|
||||
|
||||
|
||||
class OpenAIChatCompletionWithMetrics(OpenAIChatCompletion):
|
||||
metrics: list[MetricEvent] | None = None
|
||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||
|
||||
|
||||
def test_unregistered_model_routing_with_provider_data(client_with_models):
|
||||
|
|
@ -72,7 +69,7 @@ def test_unregistered_model_routing_with_provider_data(client_with_models):
|
|||
# The inference router's routing_table.impls_by_provider_id should have anthropic
|
||||
# Let's patch the anthropic provider's openai_chat_completion method
|
||||
# to avoid making real API calls
|
||||
mock_response = OpenAIChatCompletionWithMetrics(
|
||||
mock_response = OpenAIChatCompletion(
|
||||
id="chatcmpl-test-123",
|
||||
created=1234567890,
|
||||
model="claude-3-5-sonnet-20241022",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue