From a6b1588dc612df097d4fecce317547515b281ec6 Mon Sep 17 00:00:00 2001 From: Francisco Arceo Date: Wed, 10 Sep 2025 12:53:38 -0600 Subject: [PATCH] revert: Fireworks chat completion broken due to telemetry (#3402) Reverts llamastack/llama-stack#3392 --- llama_stack/core/routers/inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama_stack/core/routers/inference.py b/llama_stack/core/routers/inference.py index 9593dd5b9..2ed2d0439 100644 --- a/llama_stack/core/routers/inference.py +++ b/llama_stack/core/routers/inference.py @@ -423,7 +423,7 @@ class InferenceRouter(Inference): # response_stream = await provider.openai_completion(**params) response = await provider.openai_completion(**params) - if self.telemetry and getattr(response, "usage", None): + if self.telemetry: metrics = self._construct_metrics( prompt_tokens=response.usage.prompt_tokens, completion_tokens=response.usage.completion_tokens, @@ -529,7 +529,7 @@ class InferenceRouter(Inference): if self.store: asyncio.create_task(self.store.store_chat_completion(response, messages)) - if self.telemetry and getattr(response, "usage", None): + if self.telemetry: metrics = self._construct_metrics( prompt_tokens=response.usage.prompt_tokens, completion_tokens=response.usage.completion_tokens,