diff --git a/litellm/proxy/tests/test_proxy_exception_mapping.py b/litellm/proxy/tests/test_proxy_exception_mapping.py deleted file mode 100644 index 7fb5bedbe..000000000 --- a/litellm/proxy/tests/test_proxy_exception_mapping.py +++ /dev/null @@ -1,23 +0,0 @@ -import openai -client = openai.OpenAI( - api_key="anything", - # base_url="http://0.0.0.0:8000", -) - -try: - # request sent to model set on litellm proxy, `litellm --model` - response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ - { - "role": "user", - "content": "this is a test request, write a short poem" - }, - ]) - - print(response) -# except openai.APITimeoutError: -# print("Got openai Timeout Exception. Good job. The proxy mapped to OpenAI exceptions") -except Exception as e: - print("\n the proxy did not map to OpenAI exception. Instead got", e) - print(e.type) # type: ignore - print(e.message) # type: ignore - print(e.code) # type: ignore \ No newline at end of file diff --git a/litellm/tests/test_configs/test_bad_config.yaml b/litellm/tests/test_configs/test_bad_config.yaml new file mode 100644 index 000000000..aaa689306 --- /dev/null +++ b/litellm/tests/test_configs/test_bad_config.yaml @@ -0,0 +1,5 @@ +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + api_key: bad-key + model: gpt-3.5-turbo \ No newline at end of file diff --git a/litellm/tests/test_proxy_exception_mapping.py b/litellm/tests/test_proxy_exception_mapping.py new file mode 100644 index 000000000..a4cda1964 --- /dev/null +++ b/litellm/tests/test_proxy_exception_mapping.py @@ -0,0 +1,45 @@ +# test that the proxy actually does exception mapping to the OpenAI format + +import sys, os +from dotenv import load_dotenv + +load_dotenv() +import os, io, asyncio +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm, openai +from fastapi.testclient import TestClient +from fastapi import FastAPI +from litellm.proxy.proxy_server import router, save_worker_config, initialize # Replace with the actual module where your FastAPI router is defined + +@pytest.fixture +def client(): + filepath = os.path.dirname(os.path.abspath(__file__)) + config_fp = f"{filepath}/test_configs/test_bad_config.yaml" + initialize(config=config_fp) + app = FastAPI() + app.include_router(router) # Include your router in the test app + return TestClient(app) + +def test_chat_completion_exception(client): + try: + base_url = client.base_url + print("Base url of client= ", base_url) + + openai_client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:8000", + ) + + response = openai_client.chat.completions.create(model="gpt-3.5-turbo", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + }, + ]) + except openai.AuthenticationError: + print("Got openai Auth Exception. Good job. The proxy mapped to OpenAI exceptions") + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") \ No newline at end of file