revert: Fireworks chat completion broken due to telemetry (#3402)

Reverts llamastack/llama-stack#3392
This commit is contained in:
Francisco Arceo 2025-09-10 12:53:38 -06:00 committed by GitHub
parent f6bf36343d
commit a6b1588dc6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -423,7 +423,7 @@ class InferenceRouter(Inference):
# response_stream = await provider.openai_completion(**params)
response = await provider.openai_completion(**params)
if self.telemetry and getattr(response, "usage", None):
if self.telemetry:
metrics = self._construct_metrics(
prompt_tokens=response.usage.prompt_tokens,
completion_tokens=response.usage.completion_tokens,
@ -529,7 +529,7 @@ class InferenceRouter(Inference):
if self.store:
asyncio.create_task(self.store.store_chat_completion(response, messages))
if self.telemetry and getattr(response, "usage", None):
if self.telemetry:
metrics = self._construct_metrics(
prompt_tokens=response.usage.prompt_tokens,
completion_tokens=response.usage.completion_tokens,