mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
improvements to exception mapping
This commit is contained in:
parent
cd6b5b229f
commit
f946f61b4c
5 changed files with 73 additions and 6 deletions
|
@ -12,7 +12,7 @@ from litellm import (
|
|||
embedding,
|
||||
completion,
|
||||
# AuthenticationError,
|
||||
# InvalidRequestError,
|
||||
InvalidRequestError,
|
||||
ContextWindowExceededError,
|
||||
# RateLimitError,
|
||||
# ServiceUnavailableError,
|
||||
|
@ -34,8 +34,7 @@ litellm.vertex_location = "us-central1"
|
|||
|
||||
# Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered
|
||||
|
||||
models = ["gpt-3.5-turbo"]
|
||||
test_model = "claude-instant-1"
|
||||
models = ["command-nightly"]
|
||||
|
||||
# Test 1: Context Window Errors
|
||||
@pytest.mark.parametrize("model", models)
|
||||
|
@ -73,6 +72,9 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th
|
|||
elif model in litellm.openrouter_models:
|
||||
temporary_key = os.environ["OPENROUTER_API_KEY"]
|
||||
os.environ["OPENROUTER_API_KEY"] = "bad-key"
|
||||
elif model in litellm.aleph_alpha_models:
|
||||
temporary_key = os.environ["ALEPH_ALPHA_API_KEY"]
|
||||
os.environ["ALEPH_ALPHA_API_KEY"] = "bad-key"
|
||||
elif (
|
||||
model
|
||||
== "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||
|
@ -115,8 +117,18 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th
|
|||
os.environ["AI21_API_KEY"] = temporary_key
|
||||
elif ("togethercomputer" in model):
|
||||
os.environ["TOGETHERAI_API_KEY"] = temporary_key
|
||||
elif model in litellm.aleph_alpha_models:
|
||||
os.environ["ALEPH_ALPHA_API_KEY"] = temporary_key
|
||||
return
|
||||
|
||||
# Test 3: Invalid Request Error
|
||||
@pytest.mark.parametrize("model", models)
|
||||
def test_invalid_request_error(model):
|
||||
messages = [{"content": "hey, how's it going?", "role": "user"}]
|
||||
|
||||
with pytest.raises(InvalidRequestError):
|
||||
completion(model=model, messages=messages, max_tokens="hello world")
|
||||
|
||||
# Test 3: Rate Limit Errors
|
||||
# def test_model_call(model):
|
||||
# try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue