mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
minor fix
This commit is contained in:
parent
f9348a6bdf
commit
4271f7352e
1 changed files with 4 additions and 7 deletions
|
@ -1417,10 +1417,9 @@ class OpenAIChatCompletionToLlamaStackMixin:
|
|||
if stream:
|
||||
return OpenAIChatCompletionToLlamaStackMixin._process_stream_response(self, model, outstanding_responses)
|
||||
|
||||
response = await OpenAIChatCompletionToLlamaStackMixin._process_non_stream_response(
|
||||
return await OpenAIChatCompletionToLlamaStackMixin._process_non_stream_response(
|
||||
self, model, outstanding_responses
|
||||
)
|
||||
return response
|
||||
|
||||
async def _process_stream_response(
|
||||
self,
|
||||
|
@ -1512,11 +1511,9 @@ class OpenAIChatCompletionToLlamaStackMixin:
|
|||
)
|
||||
choices.append(choice)
|
||||
|
||||
usage = None
|
||||
if total_tokens > 0:
|
||||
usage = OpenAIChatCompletionUsage(
|
||||
prompt_tokens=total_prompt_tokens, completion_tokens=total_completion_tokens, total_tokens=total_tokens
|
||||
)
|
||||
usage = OpenAIChatCompletionUsage(
|
||||
prompt_tokens=total_prompt_tokens, completion_tokens=total_completion_tokens, total_tokens=total_tokens
|
||||
)
|
||||
|
||||
return OpenAIChatCompletion(
|
||||
id=f"chatcmpl-{uuid.uuid4()}",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue