mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
test(test_completion_with_retries.py): cleanup tests
This commit is contained in:
parent
5ade263079
commit
8a401076de
2 changed files with 2 additions and 47 deletions
|
@ -41,7 +41,7 @@ def test_completion_custom_provider_model_name():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# completion with num retries
|
||||
# completion with num retries + impact on exception mapping
|
||||
def test_completion_with_num_retries():
|
||||
try:
|
||||
response = completion(model="j2-ultra", messages=[{"messages": "vibe", "bad": "message"}], num_retries=2)
|
||||
|
@ -49,48 +49,3 @@ def test_completion_with_num_retries():
|
|||
pass
|
||||
except Exception as e:
|
||||
pytest.fail(f"Unmapped exception occurred")
|
||||
|
||||
test_completion_with_num_retries()
|
||||
# bad call
|
||||
# def test_completion_custom_provider_model_name():
|
||||
# try:
|
||||
# response = completion_with_retries(
|
||||
# model="bad-model",
|
||||
# messages=messages,
|
||||
# logger_fn=logger_fn,
|
||||
# )
|
||||
# # Add any assertions here to check the response
|
||||
# print(response)
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# impact on exception mapping
|
||||
def test_context_window():
|
||||
sample_text = "how does a court case get to the Supreme Court?" * 5000
|
||||
messages = [{"content": sample_text, "role": "user"}]
|
||||
try:
|
||||
model = "chatgpt-test"
|
||||
response = completion_with_retries(
|
||||
model=model,
|
||||
messages=messages,
|
||||
custom_llm_provider="azure",
|
||||
logger_fn=logger_fn,
|
||||
)
|
||||
print(f"response: {response}")
|
||||
except InvalidRequestError as e:
|
||||
print(f"InvalidRequestError: {e.llm_provider}")
|
||||
return
|
||||
except OpenAIError as e:
|
||||
print(f"OpenAIError: {e.llm_provider}")
|
||||
return
|
||||
except Exception as e:
|
||||
print("Uncaught Error in test_context_window")
|
||||
print(f"Error Type: {type(e).__name__}")
|
||||
print(f"Uncaught Exception - {e}")
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
return
|
||||
|
||||
|
||||
# test_context_window()
|
||||
|
||||
# test_completion_custom_provider_model_name()
|
||||
|
|
|
@ -44,7 +44,7 @@ def test_context_window(model):
|
|||
with pytest.raises(ContextWindowExceededError):
|
||||
completion(model=model, messages=messages)
|
||||
|
||||
# test_context_window(model="command-nightly")
|
||||
test_context_window(model="command-nightly")
|
||||
# Test 2: InvalidAuth Errors
|
||||
@pytest.mark.parametrize("model", models)
|
||||
def invalid_auth(model): # set the model key to an invalid key, depending on the model
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue