adding coverage for openrouter in exception mapping

This commit is contained in:
Krrish Dholakia 2023-08-29 15:30:24 -07:00
parent 436e8eadb2
commit 5bcf612724
6 changed files with 41 additions and 15 deletions

View file

@ -1,4 +1,4 @@
# from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, OpenAIError
from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, OpenAIError
import os
import sys
import traceback
@ -10,15 +10,17 @@ import litellm
from litellm import (
embedding,
completion,
AuthenticationError,
InvalidRequestError,
# AuthenticationError,
# InvalidRequestError,
ContextWindowExceededError,
RateLimitError,
ServiceUnavailableError,
OpenAIError,
# RateLimitError,
# ServiceUnavailableError,
# OpenAIError,
)
from concurrent.futures import ThreadPoolExecutor
import pytest
litellm.vertex_project = "pathrise-convert-1606954137718"
litellm.vertex_location = "us-central1"
litellm.failure_callback = ["sentry"]
# litellm.set_verbose = True
@ -33,8 +35,8 @@ litellm.failure_callback = ["sentry"]
# Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered
# models = ["gpt-3.5-turbo", "chatgpt-test", "claude-instant-1", "command-nightly"]
test_model = "j2-light"
models = ["j2-light"]
test_model = "claude-instant-1"
models = ["claude-instant-1"]
def logging_fn(model_call_dict):
@ -48,7 +50,7 @@ def logging_fn(model_call_dict):
# Test 1: Context Window Errors
@pytest.mark.parametrize("model", models)
def test_context_window(model):
sample_text = "how does a court case get to the Supreme Court?" * 5000
sample_text = "how does a court case get to the Supreme Court?" * 1000000
messages = [{"content": sample_text, "role": "user"}]
try:
print(f"model: {model}")
@ -84,14 +86,12 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th
messages = [{"content": "Hello, how are you?", "role": "user"}]
temporary_key = None
try:
custom_llm_provider = None
if model == "gpt-3.5-turbo":
temporary_key = os.environ["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = "bad-key"
elif model == "chatgpt-test":
temporary_key = os.environ["AZURE_API_KEY"]
os.environ["AZURE_API_KEY"] = "bad-key"
custom_llm_provider = "azure"
elif model == "claude-instant-1":
temporary_key = os.environ["ANTHROPIC_API_KEY"]
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
@ -104,6 +104,9 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th
elif "togethercomputer" in model:
temporary_key = os.environ["TOGETHERAI_API_KEY"]
os.environ["TOGETHERAI_API_KEY"] = "84060c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f24a"
elif model in litellm.openrouter_models:
temporary_key = os.environ["OPENROUTER_API_KEY"]
os.environ["OPENROUTER_API_KEY"] = "bad-key"
elif (
model
== "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
@ -112,7 +115,7 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th
os.environ["REPLICATE_API_KEY"] = "bad-key"
print(f"model: {model}")
response = completion(
model=model, messages=messages, custom_llm_provider=custom_llm_provider
model=model, messages=messages
)
print(f"response: {response}")
except AuthenticationError as e:
@ -123,6 +126,7 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th
print(f"OpenAIError Caught Exception - {e}")
except Exception as e:
print(type(e))
print(type(AuthenticationError))
print(e.__class__.__name__)
print(f"Uncaught Exception - {e}")
pytest.fail(f"Error occurred: {e}")