mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
test(test_completion.py): commenting out flaky test
This commit is contained in:
parent
3d7fab6c0c
commit
6fd50177e8
1 changed files with 29 additions and 29 deletions
|
@ -867,35 +867,35 @@ def test_completion_replicate_vicuna():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_replicate_vicuna()
|
||||
|
||||
def test_completion_replicate_llama2_stream():
|
||||
litellm.set_verbose=False
|
||||
model_name = "replicate/meta/llama-2-7b-chat:13c3cdee13ee059ab779f0291d29054dab00a47dad8261375654de5540165fb0"
|
||||
try:
|
||||
response = completion(
|
||||
model=model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "what is yc write 1 paragraph",
|
||||
}
|
||||
],
|
||||
stream=True,
|
||||
max_tokens=20,
|
||||
num_retries=3
|
||||
)
|
||||
print(f"response: {response}")
|
||||
# Add any assertions here to check the response
|
||||
complete_response = ""
|
||||
for i, chunk in enumerate(response):
|
||||
complete_response += chunk.choices[0].delta["content"]
|
||||
# if i == 0:
|
||||
# assert len(chunk.choices[0].delta["content"]) > 2
|
||||
# print(chunk)
|
||||
assert len(complete_response) > 5
|
||||
print(f"complete_response: {complete_response}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# commenting out - flaky test
|
||||
# def test_completion_replicate_llama2_stream():
|
||||
# litellm.set_verbose=False
|
||||
# model_name = "replicate/meta/llama-2-7b-chat:13c3cdee13ee059ab779f0291d29054dab00a47dad8261375654de5540165fb0"
|
||||
# try:
|
||||
# response = completion(
|
||||
# model=model_name,
|
||||
# messages=[
|
||||
# {
|
||||
# "role": "user",
|
||||
# "content": "what is yc write 1 paragraph",
|
||||
# }
|
||||
# ],
|
||||
# stream=True,
|
||||
# max_tokens=20,
|
||||
# num_retries=3
|
||||
# )
|
||||
# print(f"response: {response}")
|
||||
# # Add any assertions here to check the response
|
||||
# complete_response = ""
|
||||
# for i, chunk in enumerate(response):
|
||||
# complete_response += chunk.choices[0].delta["content"]
|
||||
# # if i == 0:
|
||||
# # assert len(chunk.choices[0].delta["content"]) > 2
|
||||
# # print(chunk)
|
||||
# assert len(complete_response) > 5
|
||||
# print(f"complete_response: {complete_response}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_replicate_llama2_stream()
|
||||
|
||||
def test_replicate_custom_prompt_dict():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue