forked from phoenix/litellm-mirror
test(test_users.py): add testing for global proxy spend tracking
This commit is contained in:
parent
574208f005
commit
624da17698
1 changed files with 62 additions and 1 deletions
|
@ -4,6 +4,7 @@ import pytest
|
|||
import asyncio
|
||||
import aiohttp
|
||||
import time
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
|
||||
async def new_user(session, i, user_id=None, budget=None, budget_duration=None):
|
||||
|
@ -105,7 +106,7 @@ async def test_user_update():
|
|||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_users_with_budgets():
|
||||
async def test_users_budgets_reset():
|
||||
"""
|
||||
- Create key with budget and 5s duration
|
||||
- Get 'reset_at' value
|
||||
|
@ -128,3 +129,63 @@ async def test_users_with_budgets():
|
|||
)
|
||||
reset_at_new_value = user_info["user_info"]["budget_reset_at"]
|
||||
assert reset_at_init_value != reset_at_new_value
|
||||
|
||||
|
||||
async def chat_completion(session, key, model="gpt-4"):
|
||||
client = AsyncOpenAI(api_key=key, base_url="http://0.0.0.0:4000")
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant"},
|
||||
{"role": "user", "content": f"Hello! {time.time()}"},
|
||||
]
|
||||
|
||||
data = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
}
|
||||
response = await client.chat.completions.create(**data)
|
||||
|
||||
|
||||
async def chat_completion_streaming(session, key, model="gpt-4"):
|
||||
client = AsyncOpenAI(api_key=key, base_url="http://0.0.0.0:4000")
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant"},
|
||||
{"role": "user", "content": f"Hello! {time.time()}"},
|
||||
]
|
||||
|
||||
data = {"model": model, "messages": messages, "stream": True}
|
||||
response = await client.chat.completions.create(**data)
|
||||
async for chunk in response:
|
||||
continue
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_global_proxy_budget_update():
|
||||
"""
|
||||
- Get proxy current spend
|
||||
- Make chat completion call (normal)
|
||||
- Assert spend increased
|
||||
- Make chat completion call (streaming)
|
||||
- Assert spend increased
|
||||
"""
|
||||
get_user = f"litellm-proxy-budget"
|
||||
async with aiohttp.ClientSession() as session:
|
||||
user_info = await get_user_info(
|
||||
session=session, get_user=get_user, call_user="sk-1234"
|
||||
)
|
||||
original_spend = user_info["user_info"]["spend"]
|
||||
await chat_completion(session=session, key="sk-1234")
|
||||
await asyncio.sleep(5) # let db update
|
||||
user_info = await get_user_info(
|
||||
session=session, get_user=get_user, call_user="sk-1234"
|
||||
)
|
||||
new_spend = user_info["user_info"]["spend"]
|
||||
print(f"new_spend: {new_spend}; original_spend: {original_spend}")
|
||||
assert new_spend > original_spend
|
||||
await chat_completion_streaming(session=session, key="sk-1234")
|
||||
await asyncio.sleep(5) # let db update
|
||||
user_info = await get_user_info(
|
||||
session=session, get_user=get_user, call_user="sk-1234"
|
||||
)
|
||||
new_new_spend = user_info["user_info"]["spend"]
|
||||
print(f"new_spend: {new_spend}; original_spend: {original_spend}")
|
||||
assert new_new_spend > new_spend
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue