mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
revert: Fireworks chat completion broken due to telemetry (#3402)
Reverts llamastack/llama-stack#3392
This commit is contained in:
parent
f6bf36343d
commit
a6b1588dc6
1 changed files with 2 additions and 2 deletions
|
@ -423,7 +423,7 @@ class InferenceRouter(Inference):
|
||||||
# response_stream = await provider.openai_completion(**params)
|
# response_stream = await provider.openai_completion(**params)
|
||||||
|
|
||||||
response = await provider.openai_completion(**params)
|
response = await provider.openai_completion(**params)
|
||||||
if self.telemetry and getattr(response, "usage", None):
|
if self.telemetry:
|
||||||
metrics = self._construct_metrics(
|
metrics = self._construct_metrics(
|
||||||
prompt_tokens=response.usage.prompt_tokens,
|
prompt_tokens=response.usage.prompt_tokens,
|
||||||
completion_tokens=response.usage.completion_tokens,
|
completion_tokens=response.usage.completion_tokens,
|
||||||
|
@ -529,7 +529,7 @@ class InferenceRouter(Inference):
|
||||||
if self.store:
|
if self.store:
|
||||||
asyncio.create_task(self.store.store_chat_completion(response, messages))
|
asyncio.create_task(self.store.store_chat_completion(response, messages))
|
||||||
|
|
||||||
if self.telemetry and getattr(response, "usage", None):
|
if self.telemetry:
|
||||||
metrics = self._construct_metrics(
|
metrics = self._construct_metrics(
|
||||||
prompt_tokens=response.usage.prompt_tokens,
|
prompt_tokens=response.usage.prompt_tokens,
|
||||||
completion_tokens=response.usage.completion_tokens,
|
completion_tokens=response.usage.completion_tokens,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue