mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
(test) add proxy cli testing
This commit is contained in:
parent
fcc250b312
commit
b283dd2a07
1 changed files with 48 additions and 0 deletions
48
litellm/tests/test_proxy_cli.py
Normal file
48
litellm/tests/test_proxy_cli.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
import subprocess
|
||||
import time
|
||||
import openai
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
## This tests the litellm proxy cli, it creates a proxy server and makes a basic chat completion request to gpt-3.5-turbo
|
||||
## Do not comment this test out
|
||||
|
||||
def test_basic_proxy_cli_command():
|
||||
|
||||
# Command to run
|
||||
command = "python3 ../proxy/proxy_cli.py --model gpt-3.5-turbo --port 51670"
|
||||
print("Running command to start proxy")
|
||||
|
||||
# Start the subprocess asynchronously
|
||||
process = subprocess.Popen(command, shell=True)
|
||||
|
||||
# Allow some time for the proxy server to start (adjust as needed)
|
||||
time.sleep(1)
|
||||
|
||||
# Make a request using the openai package
|
||||
client = openai.OpenAI(
|
||||
api_key="Your API Key", # Replace with your actual API key
|
||||
base_url="http://0.0.0.0:51670"
|
||||
)
|
||||
|
||||
try:
|
||||
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test request, write a short poem"
|
||||
}
|
||||
])
|
||||
print(response)
|
||||
response_str = response.choices[0].message.content
|
||||
assert len(response_str) > 10
|
||||
except Exception as e:
|
||||
print("Got exception")
|
||||
print(e)
|
||||
process.terminate() # Terminate the subprocess to close down the server
|
||||
pytest.fail("Basic test, proxy cli failed", e)
|
||||
|
||||
# Terminate the subprocess to close down the server
|
||||
process.terminate()
|
||||
test_basic_proxy_cli_command()
|
Loading…
Add table
Add a link
Reference in a new issue