forked from phoenix/litellm-mirror
Merge pull request #1765 from BerriAI/litellm_show_correct_provider_in_exception
[Feat] Show correct provider in exceptions - for Mistral API, PerplexityAPI
This commit is contained in:
commit
01a69ea5a8
2 changed files with 81 additions and 25 deletions
|
@ -444,6 +444,52 @@ def test_content_policy_violation_error_streaming():
|
|||
asyncio.run(test_get_error())
|
||||
|
||||
|
||||
def test_completion_perplexity_exception():
|
||||
try:
|
||||
import openai
|
||||
|
||||
print("perplexity test\n\n")
|
||||
litellm.set_verbose = True
|
||||
## Test azure call
|
||||
old_azure_key = os.environ["PERPLEXITYAI_API_KEY"]
|
||||
os.environ["PERPLEXITYAI_API_KEY"] = "good morning"
|
||||
response = completion(
|
||||
model="perplexity/mistral-7b-instruct",
|
||||
messages=[{"role": "user", "content": "hello"}],
|
||||
)
|
||||
os.environ["PERPLEXITYAI_API_KEY"] = old_azure_key
|
||||
pytest.fail("Request should have failed - bad api key")
|
||||
except openai.AuthenticationError as e:
|
||||
os.environ["PERPLEXITYAI_API_KEY"] = old_azure_key
|
||||
print("exception: ", e)
|
||||
assert "PerplexityException" in str(e)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def test_completion_openai_api_key_exception():
|
||||
try:
|
||||
import openai
|
||||
|
||||
print("gpt-3.5 test\n\n")
|
||||
litellm.set_verbose = True
|
||||
## Test azure call
|
||||
old_azure_key = os.environ["OPENAI_API_KEY"]
|
||||
os.environ["OPENAI_API_KEY"] = "good morning"
|
||||
response = completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "hello"}],
|
||||
)
|
||||
os.environ["OPENAI_API_KEY"] = old_azure_key
|
||||
pytest.fail("Request should have failed - bad api key")
|
||||
except openai.AuthenticationError as e:
|
||||
os.environ["OPENAI_API_KEY"] = old_azure_key
|
||||
print("exception: ", e)
|
||||
assert "OpenAIException" in str(e)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# tesy_async_acompletion()
|
||||
|
||||
# # test_invalid_request_error(model="command-nightly")
|
||||
|
|
|
@ -5906,14 +5906,24 @@ def exception_type(
|
|||
or custom_llm_provider == "custom_openai"
|
||||
or custom_llm_provider in litellm.openai_compatible_providers
|
||||
):
|
||||
# custom_llm_provider is openai, make it OpenAI
|
||||
if custom_llm_provider == "openai":
|
||||
exception_provider = "OpenAI" + "Exception"
|
||||
else:
|
||||
exception_provider = (
|
||||
custom_llm_provider[0].upper()
|
||||
+ custom_llm_provider[1:]
|
||||
+ "Exception"
|
||||
)
|
||||
|
||||
if (
|
||||
"This model's maximum context length is" in error_str
|
||||
or "Request too large" in error_str
|
||||
):
|
||||
exception_mapping_worked = True
|
||||
raise ContextWindowExceededError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
llm_provider="openai",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
)
|
||||
|
@ -5923,8 +5933,8 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise NotFoundError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
llm_provider="openai",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
)
|
||||
|
@ -5934,8 +5944,8 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise ContentPolicyViolationError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
llm_provider="openai",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
)
|
||||
|
@ -5945,8 +5955,8 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
llm_provider="openai",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
)
|
||||
|
@ -5955,63 +5965,63 @@ def exception_type(
|
|||
if original_exception.status_code == 401:
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
llm_provider="openai",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
)
|
||||
elif original_exception.status_code == 404:
|
||||
exception_mapping_worked = True
|
||||
raise NotFoundError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="openai",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
)
|
||||
elif original_exception.status_code == 408:
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="openai",
|
||||
llm_provider=custom_llm_provider,
|
||||
)
|
||||
elif original_exception.status_code == 422:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="openai",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
)
|
||||
elif original_exception.status_code == 429:
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="openai",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
)
|
||||
elif original_exception.status_code == 503:
|
||||
exception_mapping_worked = True
|
||||
raise ServiceUnavailableError(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="openai",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
)
|
||||
elif original_exception.status_code == 504: # gateway timeout error
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="openai",
|
||||
llm_provider=custom_llm_provider,
|
||||
)
|
||||
else:
|
||||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
status_code=original_exception.status_code,
|
||||
message=f"OpenAIException - {original_exception.message}",
|
||||
llm_provider="openai",
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
request=original_exception.request,
|
||||
)
|
||||
|
@ -7015,7 +7025,7 @@ def exception_type(
|
|||
): # deal with edge-case invalid request error bug in openai-python sdk
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"OpenAIException: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
|
||||
message=f"{exception_provider}: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue