mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
add finish reason to streamed responses
This commit is contained in:
parent
ca72ebd2df
commit
0f769c5417
3 changed files with 27 additions and 4 deletions
|
@ -2198,12 +2198,22 @@ class CustomStreamWrapper:
|
|||
completion_obj["content"] = self.handle_openai_text_completion_chunk(chunk)
|
||||
else: # openai chat/azure models
|
||||
chunk = next(self.completion_stream)
|
||||
completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk)
|
||||
return chunk # open ai returns finish_reason, we should just return the openai chunk
|
||||
|
||||
#completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk)
|
||||
|
||||
# LOGGING
|
||||
threading.Thread(target=self.logging_obj.success_handler, args=(completion_obj,)).start()
|
||||
# return this for all models
|
||||
return {"choices": [{"delta": completion_obj}]}
|
||||
return {
|
||||
"choices":
|
||||
[
|
||||
{
|
||||
"delta": completion_obj,
|
||||
"finish_reason": "stop"
|
||||
},
|
||||
]
|
||||
}
|
||||
except Exception as e:
|
||||
raise StopIteration
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue