mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
fix: Fireworks chat completion broken due to telemetry (#3392)
# What does this PR do? Fix fireworks chat completion broken due to telemetry expecting response.usage Closes https://github.com/llamastack/llama-stack/issues/3391 ## Test Plan 1. `uv run --with llama-stack llama stack build --distro starter --image-type venv --run` Try ``` curl -X POST http://0.0.0.0:8321/v1/openai/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ "model": "fireworks/accounts/fireworks/models/llama-v3p1-8b-instruct", "messages": [{"role": "user", "content": "Hello!"}] }' ``` ``` {"id":"chatcmpl-ee922a08-0df0-4974-b0d3-b322113e8bc0","choices":[{"message":{"role":"assistant","content":"Hello! How can I assist you today?","name":null,"tool_calls":null},"finish_reason":"stop","index":0,"logprobs":null}],"object":"chat.completion","created":1757456375,"model":"fireworks/accounts/fireworks/models/llama-v3p1-8b-instruct"}% ``` Without fix fails as mentioned in https://github.com/llamastack/llama-stack/issues/3391 Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com>
This commit is contained in:
parent
c86e45496e
commit
935b8e28de
1 changed files with 2 additions and 2 deletions
|
@ -423,7 +423,7 @@ class InferenceRouter(Inference):
|
||||||
# response_stream = await provider.openai_completion(**params)
|
# response_stream = await provider.openai_completion(**params)
|
||||||
|
|
||||||
response = await provider.openai_completion(**params)
|
response = await provider.openai_completion(**params)
|
||||||
if self.telemetry:
|
if self.telemetry and getattr(response, "usage", None):
|
||||||
metrics = self._construct_metrics(
|
metrics = self._construct_metrics(
|
||||||
prompt_tokens=response.usage.prompt_tokens,
|
prompt_tokens=response.usage.prompt_tokens,
|
||||||
completion_tokens=response.usage.completion_tokens,
|
completion_tokens=response.usage.completion_tokens,
|
||||||
|
@ -529,7 +529,7 @@ class InferenceRouter(Inference):
|
||||||
if self.store:
|
if self.store:
|
||||||
asyncio.create_task(self.store.store_chat_completion(response, messages))
|
asyncio.create_task(self.store.store_chat_completion(response, messages))
|
||||||
|
|
||||||
if self.telemetry:
|
if self.telemetry and getattr(response, "usage", None):
|
||||||
metrics = self._construct_metrics(
|
metrics = self._construct_metrics(
|
||||||
prompt_tokens=response.usage.prompt_tokens,
|
prompt_tokens=response.usage.prompt_tokens,
|
||||||
completion_tokens=response.usage.completion_tokens,
|
completion_tokens=response.usage.completion_tokens,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue