mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
51 lines
1.5 KiB
Python
51 lines
1.5 KiB
Python
import subprocess
|
|
import time
|
|
import openai
|
|
import pytest
|
|
from dotenv import load_dotenv
|
|
import os
|
|
|
|
load_dotenv()
|
|
|
|
## This tests the litellm proxy cli, it creates a proxy server and makes a basic chat completion request to gpt-3.5-turbo
|
|
## Do not comment this test out
|
|
|
|
def test_basic_proxy_cli_command():
|
|
|
|
# Command to run
|
|
print("current working dir", os.getcwd())
|
|
|
|
command = "python3 litellm/proxy/proxy_cli.py --model gpt-3.5-turbo --port 51670 --debug"
|
|
print("Running command to start proxy")
|
|
|
|
# Start the subprocess asynchronously
|
|
process = subprocess.Popen(command, shell=True)
|
|
|
|
# Allow some time for the proxy server to start (adjust as needed)
|
|
time.sleep(1)
|
|
|
|
# Make a request using the openai package
|
|
client = openai.OpenAI(
|
|
api_key="Your API Key", # Replace with your actual API key
|
|
base_url="http://0.0.0.0:51670"
|
|
)
|
|
|
|
try:
|
|
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=[
|
|
{
|
|
"role": "user",
|
|
"content": "this is a test request, write a short poem"
|
|
}
|
|
])
|
|
print(response)
|
|
response_str = response.choices[0].message.content
|
|
assert len(response_str) > 10
|
|
except Exception as e:
|
|
print("Got exception")
|
|
print(e)
|
|
process.terminate() # Terminate the subprocess to close down the server
|
|
pytest.fail("Basic test, proxy cli failed", e)
|
|
|
|
# Terminate the subprocess to close down the server
|
|
process.terminate()
|
|
test_basic_proxy_cli_command()
|