add test for proxy startup

This commit is contained in:
Ishaan Jaff 2024-11-20 17:20:59 -08:00
parent e124753317
commit 048c137f86
4 changed files with 102 additions and 26 deletions

View file

@ -1039,6 +1039,47 @@ jobs:
ls
python -m pytest -vv tests/otel_tests -x --junitxml=test-results/junit.xml --durations=5
no_output_timeout: 120m
# Clean up first container
- run:
name: Stop and remove first container
command: |
docker stop my-app
docker rm my-app
# Second Docker Container Run with Different Config
- run:
name: Run Second Docker container
command: |
docker run -d \
-p 4000:4000 \
-e DATABASE_URL=$PROXY_DATABASE_URL \
-e REDIS_HOST=$REDIS_HOST \
-e REDIS_PASSWORD=$REDIS_PASSWORD \
-e REDIS_PORT=$REDIS_PORT \
-e LITELLM_MASTER_KEY="sk-1234" \
-e OPENAI_API_KEY=$OPENAI_API_KEY \
-e LITELLM_LICENSE="bad-license" \
--name my-app-3 \
-v $(pwd)/litellm/proxy/example_config_yaml/enterprise_config.yaml:/app/config.yaml \
my-app:latest \
--config /app/config.yaml \
--port 4000 \
--detailed_debug
- run:
name: Start outputting logs for second container
command: docker logs -f my-app-2
background: true
- run:
name: Wait for second app to be ready
command: dockerize -wait http://localhost:4000 -timeout 5m
- run:
name: Run second round of tests
command: |
python -m pytest -vv tests/basic_proxy_startup_tests -x --junitxml=test-results/junit-2.xml --durations=5
no_output_timeout: 120m
# Store test results
- store_test_results:

View file

@ -0,0 +1,17 @@
model_list:
- model_name: gpt-4
litellm_params:
model: openai/fake
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
tags: ["teamA"]
model_info:
id: "team-a-model"
litellm_settings:
cache: true
callbacks: ["prometheus"]
router_settings:
enable_tag_filtering: True # 👈 Key Change

View file

@ -0,0 +1,44 @@
import pytest
import aiohttp
from typing import Optional
@pytest.mark.asyncio
async def test_health_and_chat_completion():
"""
Test health endpoints and chat completion:
1. Check /health/readiness
2. Check /health/liveness
3. Make a chat completion call
"""
async with aiohttp.ClientSession() as session:
# Test readiness endpoint
async with session.get("http://0.0.0.0:4000/health/readiness") as response:
assert response.status == 200
readiness_response = await response.json()
assert readiness_response["status"] == "OK"
# Test liveness endpoint
async with session.get("http://0.0.0.0:4000/health/liveness") as response:
assert response.status == 200
liveness_response = await response.json()
assert liveness_response["status"] == "OK"
# Make a chat completion call
url = "http://0.0.0.0:4000/chat/completions"
headers = {
"Authorization": "Bearer sk-1234",
"Content-Type": "application/json",
}
data = {
"model": "gpt-4",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
}
async with session.post(url, headers=headers, json=data) as response:
assert response.status == 200
completion_response = await response.json()
assert "choices" in completion_response

View file

@ -1,26 +0,0 @@
model_list:
- model_name: Azure OpenAI GPT-4 Canada
litellm_params:
model: azure/chatgpt-v-2
api_base: os.environ/AZURE_API_BASE
api_key: os.environ/AZURE_API_KEY
api_version: "2023-07-01-preview"
model_info:
mode: chat
input_cost_per_token: 0.0002
id: gm
- model_name: azure-embedding-model
litellm_params:
model: azure/azure-embedding-model
api_base: os.environ/AZURE_API_BASE
api_key: os.environ/AZURE_API_KEY
api_version: "2023-07-01-preview"
model_info:
mode: embedding
input_cost_per_token: 0.002
id: hello
litellm_settings:
drop_params: True
set_verbose: True
callbacks: ["prometheus", "gcs_bucket"]