forked from phoenix/litellm-mirror
(feat) mistral - add exception mapping
This commit is contained in:
parent
a033016a63
commit
c0cc78b943
3 changed files with 37 additions and 3 deletions
|
@ -163,6 +163,14 @@ openai_compatible_endpoints: List = [
|
||||||
"api.mistral.ai/v1"
|
"api.mistral.ai/v1"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# this is maintained for Exception Mapping
|
||||||
|
openai_compatible_providers: List = [
|
||||||
|
"anyscale",
|
||||||
|
"mistral",
|
||||||
|
"deepinfra",
|
||||||
|
"perplexity"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
# well supported replicate llms
|
# well supported replicate llms
|
||||||
replicate_models: List = [
|
replicate_models: List = [
|
||||||
|
|
|
@ -197,7 +197,7 @@ def test_completion_azure_exception():
|
||||||
print("good job got the correct error for azure when key not set")
|
print("good job got the correct error for azure when key not set")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_azure_exception()
|
# test_completion_azure_exception()
|
||||||
|
|
||||||
async def asynctest_completion_azure_exception():
|
async def asynctest_completion_azure_exception():
|
||||||
try:
|
try:
|
||||||
|
@ -227,7 +227,6 @@ async def asynctest_completion_azure_exception():
|
||||||
print("Got wrong exception")
|
print("Got wrong exception")
|
||||||
print("exception", e)
|
print("exception", e)
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# import asyncio
|
# import asyncio
|
||||||
# asyncio.run(
|
# asyncio.run(
|
||||||
# asynctest_completion_azure_exception()
|
# asynctest_completion_azure_exception()
|
||||||
|
@ -261,6 +260,33 @@ def test_completion_openai_exception():
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
# test_completion_openai_exception()
|
# test_completion_openai_exception()
|
||||||
|
|
||||||
|
def test_completion_mistral_exception():
|
||||||
|
# test if mistral/mistral-tiny raises openai.AuthenticationError
|
||||||
|
try:
|
||||||
|
import openai
|
||||||
|
print("Testing mistral ai exception mapping")
|
||||||
|
litellm.set_verbose=False
|
||||||
|
## Test azure call
|
||||||
|
old_azure_key = os.environ["MISTRAL_API_KEY"]
|
||||||
|
os.environ["MISTRAL_API_KEY"] = "good morning"
|
||||||
|
response = completion(
|
||||||
|
model="mistral/mistral-tiny",
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "hello"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
)
|
||||||
|
print(f"response: {response}")
|
||||||
|
print(response)
|
||||||
|
except openai.AuthenticationError as e:
|
||||||
|
os.environ["MISTRAL_API_KEY"] = old_azure_key
|
||||||
|
print("good job got the correct error for openai when key not set")
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
# test_completion_mistral_exception()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -4163,7 +4163,7 @@ def exception_type(
|
||||||
llm_provider=custom_llm_provider
|
llm_provider=custom_llm_provider
|
||||||
)
|
)
|
||||||
|
|
||||||
if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai":
|
if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai" or custom_llm_provider in litellm.openai_compatible_providers:
|
||||||
if "This model's maximum context length is" in error_str or "Request too large" in error_str:
|
if "This model's maximum context length is" in error_str or "Request too large" in error_str:
|
||||||
exception_mapping_worked = True
|
exception_mapping_worked = True
|
||||||
raise ContextWindowExceededError(
|
raise ContextWindowExceededError(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue