mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 10:14:26 +00:00
(test) proxy_server /chat/completions
This commit is contained in:
parent
7d221fe863
commit
8a72487dcf
1 changed files with 48 additions and 0 deletions
48
litellm/tests/test_proxy_server.py
Normal file
48
litellm/tests/test_proxy_server.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
import sys, os
|
||||
import traceback
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import os, io
|
||||
|
||||
# this file is to test litellm/proxy
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm
|
||||
from litellm import embedding, completion, completion_cost, Timeout
|
||||
from litellm import RateLimitError
|
||||
|
||||
# test /chat/completion request to the proxy
|
||||
from fastapi.testclient import TestClient
|
||||
from fastapi import FastAPI
|
||||
from litellm.proxy.proxy_server import router # Replace with the actual module where your FastAPI router is defined
|
||||
app = FastAPI()
|
||||
app.include_router(router) # Include your router in the test app
|
||||
client = TestClient(app)
|
||||
def test_chat_completion():
|
||||
try:
|
||||
# Your test data
|
||||
test_data = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "hi"
|
||||
},
|
||||
],
|
||||
"max_tokens": 10,
|
||||
}
|
||||
print("testing proxy server")
|
||||
response = client.post("/v1/chat/completions", json=test_data)
|
||||
|
||||
assert response.status_code == 200
|
||||
result = response.json()
|
||||
print(f"Received response: {result}")
|
||||
except Exception as e:
|
||||
pytest.fail("LiteLLM Proxy test failed. Exception", e)
|
||||
|
||||
# Run the test
|
||||
test_chat_completion()
|
Loading…
Add table
Add a link
Reference in a new issue