diff --git a/litellm/tests/test_proxy_cli.py b/litellm/tests/test_proxy_cli.py new file mode 100644 index 000000000..41c9ac604 --- /dev/null +++ b/litellm/tests/test_proxy_cli.py @@ -0,0 +1,48 @@ +import subprocess +import time +import openai +import pytest +from dotenv import load_dotenv + +load_dotenv() + +## This tests the litellm proxy cli, it creates a proxy server and makes a basic chat completion request to gpt-3.5-turbo +## Do not comment this test out + +def test_basic_proxy_cli_command(): + + # Command to run + command = "python3 ../proxy/proxy_cli.py --model gpt-3.5-turbo --port 51670" + print("Running command to start proxy") + + # Start the subprocess asynchronously + process = subprocess.Popen(command, shell=True) + + # Allow some time for the proxy server to start (adjust as needed) + time.sleep(1) + + # Make a request using the openai package + client = openai.OpenAI( + api_key="Your API Key", # Replace with your actual API key + base_url="http://0.0.0.0:51670" + ) + + try: + response = client.chat.completions.create(model="gpt-3.5-turbo", messages=[ + { + "role": "user", + "content": "this is a test request, write a short poem" + } + ]) + print(response) + response_str = response.choices[0].message.content + assert len(response_str) > 10 + except Exception as e: + print("Got exception") + print(e) + process.terminate() # Terminate the subprocess to close down the server + pytest.fail("Basic test, proxy cli failed", e) + + # Terminate the subprocess to close down the server + process.terminate() +test_basic_proxy_cli_command()