forked from phoenix/litellm-mirror
fix(utils.py): streaming
This commit is contained in:
parent
fe9c1e2b45
commit
dd925d3de3
2 changed files with 19 additions and 11 deletions
|
@ -1,13 +1,18 @@
|
||||||
# #### What this tests ####
|
#### What this tests ####
|
||||||
# # This tests the LiteLLM Class
|
# This tests the LiteLLM Class
|
||||||
|
|
||||||
# import sys, os
|
import sys, os
|
||||||
# import traceback
|
import traceback
|
||||||
# import pytest
|
import pytest
|
||||||
# sys.path.insert(
|
sys.path.insert(
|
||||||
# 0, os.path.abspath("../..")
|
0, os.path.abspath("../..")
|
||||||
# ) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
# import litellm
|
import litellm
|
||||||
|
|
||||||
|
mr1 = litellm.ModelResponse(stream=True, model="gpt-3.5-turbo")
|
||||||
|
mr1.choices[0].finish_reason = "stop"
|
||||||
|
mr2 = litellm.ModelResponse(stream=True, model="gpt-3.5-turbo")
|
||||||
|
print(mr2.choices[0].finish_reason)
|
||||||
# litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
# from litellm import Router
|
# from litellm import Router
|
||||||
# import instructor
|
# import instructor
|
||||||
|
|
|
@ -226,7 +226,10 @@ class Usage(OpenAIObject):
|
||||||
class StreamingChoices(OpenAIObject):
|
class StreamingChoices(OpenAIObject):
|
||||||
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
|
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
|
||||||
super(StreamingChoices, self).__init__(**params)
|
super(StreamingChoices, self).__init__(**params)
|
||||||
self.finish_reason = finish_reason
|
if finish_reason:
|
||||||
|
self.finish_reason = finish_reason
|
||||||
|
else:
|
||||||
|
self.finish_reason = None
|
||||||
self.index = index
|
self.index = index
|
||||||
if delta:
|
if delta:
|
||||||
self.delta = delta
|
self.delta = delta
|
||||||
|
@ -4458,7 +4461,7 @@ class CustomStreamWrapper:
|
||||||
|
|
||||||
def chunk_creator(self, chunk):
|
def chunk_creator(self, chunk):
|
||||||
model_response = ModelResponse(stream=True, model=self.model)
|
model_response = ModelResponse(stream=True, model=self.model)
|
||||||
print_verbose(f"model_response finish reason 1: {model_response.choices[0].finish_reason}")
|
model_response.choices[0].finish_reason = None
|
||||||
try:
|
try:
|
||||||
# return this for all models
|
# return this for all models
|
||||||
completion_obj = {"content": ""}
|
completion_obj = {"content": ""}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue