forked from phoenix/litellm-mirror
test(test_streaming.py): retry if openai is inconsistent with stream options
This commit is contained in:
parent
20ad1a5189
commit
207924d08f
1 changed files with 33 additions and 25 deletions
|
@ -1730,6 +1730,8 @@ def test_openai_stream_options_call():
|
|||
|
||||
def test_openai_stream_options_call_text_completion():
|
||||
litellm.set_verbose = False
|
||||
for idx in range(3):
|
||||
try:
|
||||
response = litellm.text_completion(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
prompt="say GM - we're going to make it ",
|
||||
|
@ -1759,6 +1761,12 @@ def test_openai_stream_options_call_text_completion():
|
|||
|
||||
# assert all non last chunks have usage=None
|
||||
assert all(chunk.usage is None for chunk in chunks[:-1])
|
||||
break
|
||||
except Exception as e:
|
||||
if idx < 2:
|
||||
pass
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def test_openai_text_completion_call():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue