forked from phoenix/litellm-mirror
Improve test
This commit is contained in:
parent
638984b764
commit
2e5278990d
1 changed files with 33 additions and 21 deletions
|
@ -440,33 +440,45 @@ def test_completion_azure_stream_content_filter_no_delta():
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
stream_iterator = iter(chunks)
|
chunk_list = []
|
||||||
|
for chunk in chunks:
|
||||||
|
new_chunk = litellm.ModelResponse(stream=True, id=chunk["id"])
|
||||||
|
if "choices" in chunk and isinstance(chunk["choices"], list):
|
||||||
|
new_choices = []
|
||||||
|
for choice in chunk["choices"]:
|
||||||
|
if isinstance(choice, litellm.utils.StreamingChoices):
|
||||||
|
_new_choice = choice
|
||||||
|
elif isinstance(choice, dict):
|
||||||
|
_new_choice = litellm.utils.StreamingChoices(**choice)
|
||||||
|
new_choices.append(_new_choice)
|
||||||
|
new_chunk.choices = new_choices
|
||||||
|
chunk_list.append(new_chunk)
|
||||||
|
|
||||||
|
completion_stream = ModelResponseListIterator(model_responses=chunk_list)
|
||||||
|
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
|
|
||||||
logging_obj = litellm.Logging(
|
|
||||||
model="berri-benchmarking-Llama-2-70b-chat-hf-4",
|
|
||||||
messages=messages,
|
|
||||||
stream=True,
|
|
||||||
litellm_call_id="1234",
|
|
||||||
function_id="function_id",
|
|
||||||
call_type="acompletion",
|
|
||||||
start_time=time.time(),
|
|
||||||
)
|
|
||||||
response = litellm.CustomStreamWrapper(
|
response = litellm.CustomStreamWrapper(
|
||||||
completion_stream=stream_iterator,
|
completion_stream=completion_stream,
|
||||||
model="azure/gpt-4o",
|
model="azure/gpt-4o",
|
||||||
custom_llm_provider="azure",
|
custom_llm_provider="cached_response",
|
||||||
logging_obj=logging_obj,
|
messages=[{"role": "user", "content": "Hey"}],
|
||||||
)
|
stream=True,
|
||||||
complete_response = ""
|
call_type="completion",
|
||||||
|
start_time=time.time(),
|
||||||
|
litellm_call_id="12345",
|
||||||
|
function_id="1245",
|
||||||
|
),
|
||||||
|
|
||||||
for idx, chunk in enumerate(response):
|
for idx, chunk in enumerate(response):
|
||||||
# print
|
complete_response = ""
|
||||||
chunk, finished = streaming_format_tests(idx, chunk)
|
for idx, chunk in enumerate(response):
|
||||||
complete_response += chunk
|
# print
|
||||||
if finished:
|
chunk, finished = streaming_format_tests(idx, chunk)
|
||||||
break
|
complete_response += chunk
|
||||||
assert len(complete_response) > 0
|
if finished:
|
||||||
|
break
|
||||||
|
assert len(complete_response) > 0
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"An exception occurred - {str(e)}")
|
pytest.fail(f"An exception occurred - {str(e)}")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue