mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-05 12:21:52 +00:00
fix: Fireworks chat completion broken due to telemetry
This commit is contained in:
parent
28696c3f30
commit
6b36f25531
1 changed files with 2 additions and 2 deletions
|
@ -423,7 +423,7 @@ class InferenceRouter(Inference):
|
|||
# response_stream = await provider.openai_completion(**params)
|
||||
|
||||
response = await provider.openai_completion(**params)
|
||||
if self.telemetry:
|
||||
if self.telemetry and getattr(response, "usage", None):
|
||||
metrics = self._construct_metrics(
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
|
@ -529,7 +529,7 @@ class InferenceRouter(Inference):
|
|||
if self.store:
|
||||
asyncio.create_task(self.store.store_chat_completion(response, messages))
|
||||
|
||||
if self.telemetry:
|
||||
if self.telemetry and getattr(response, "usage", None):
|
||||
metrics = self._construct_metrics(
|
||||
prompt_tokens=response.usage.prompt_tokens,
|
||||
completion_tokens=response.usage.completion_tokens,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue