fix(utils.py): add more logging to identify ci/cd issue

This commit is contained in:
Krrish Dholakia 2024-04-25 19:57:24 -07:00
parent 039ea5497f
commit 850b056df5
2 changed files with 200 additions and 199 deletions

View file

@ -2516,7 +2516,13 @@ def test_unit_test_custom_stream_wrapper():
assert freq == 1
chunks = [
def test_amazing_unit_test_custom_stream_wrapper_n():
"""
Test if the translated output maps exactly to the received openai input
Relevant issue: https://github.com/BerriAI/litellm/issues/3276
"""
chunks = [
{
"id": "chatcmpl-9HzZIMCtVq7CbTmdwEZrktiTeoiYe",
"object": "chat.completion.chunk",
@ -2711,15 +2717,8 @@ chunks = [
{"index": 1, "delta": {}, "logprobs": None, "finish_reason": "stop"}
],
},
]
]
def test_unit_test_custom_stream_wrapper_n():
"""
Test if the translated output maps exactly to the received openai input
Relevant issue: https://github.com/BerriAI/litellm/issues/3276
"""
litellm.set_verbose = False
chunk_list = []

View file

@ -10198,9 +10198,11 @@ class CustomStreamWrapper:
choice_json.pop(
"finish_reason", None
) # for mistral etc. which return a value in their last chunk (not-openai compatible).
print_verbose(f"choice_json: {choice_json}")
choices.append(StreamingChoices(**choice_json))
except Exception as e:
choices.append(StreamingChoices())
print_verbose(f"choices in streaming: {choices}")
model_response.choices = choices
else:
return