forked from phoenix/litellm-mirror
test(test_key_generate_prisma.py): add unit testing for global proxy budget
This commit is contained in:
parent
30a8071bf1
commit
f148094d18
3 changed files with 182 additions and 9 deletions
|
@ -197,6 +197,7 @@ use_queue = False
|
||||||
health_check_interval = None
|
health_check_interval = None
|
||||||
health_check_results = {}
|
health_check_results = {}
|
||||||
queue: List = []
|
queue: List = []
|
||||||
|
litellm_proxy_budget_name = "litellm-proxy-budget"
|
||||||
### INITIALIZE GLOBAL LOGGING OBJECT ###
|
### INITIALIZE GLOBAL LOGGING OBJECT ###
|
||||||
proxy_logging_obj = ProxyLogging(user_api_key_cache=user_api_key_cache)
|
proxy_logging_obj = ProxyLogging(user_api_key_cache=user_api_key_cache)
|
||||||
### REDIS QUEUE ###
|
### REDIS QUEUE ###
|
||||||
|
@ -374,7 +375,7 @@ async def user_api_key_auth(
|
||||||
if valid_token.user_id is not None:
|
if valid_token.user_id is not None:
|
||||||
if prisma_client is not None:
|
if prisma_client is not None:
|
||||||
user_id_information = await prisma_client.get_data(
|
user_id_information = await prisma_client.get_data(
|
||||||
user_id_list=[valid_token.user_id, "litellm-proxy-budget"],
|
user_id_list=[valid_token.user_id, litellm_proxy_budget_name],
|
||||||
table_name="user",
|
table_name="user",
|
||||||
query_type="find_all",
|
query_type="find_all",
|
||||||
)
|
)
|
||||||
|
@ -672,7 +673,7 @@ async def update_database(
|
||||||
- Update that user's row
|
- Update that user's row
|
||||||
- Update litellm-proxy-budget row (global proxy spend)
|
- Update litellm-proxy-budget row (global proxy spend)
|
||||||
"""
|
"""
|
||||||
user_ids = [user_id, "litellm-proxy-budget"]
|
user_ids = [user_id, litellm_proxy_budget_name]
|
||||||
data_list = []
|
data_list = []
|
||||||
for id in user_ids:
|
for id in user_ids:
|
||||||
if id is None:
|
if id is None:
|
||||||
|
@ -685,6 +686,7 @@ async def update_database(
|
||||||
)
|
)
|
||||||
if existing_spend_obj is None:
|
if existing_spend_obj is None:
|
||||||
existing_spend = 0
|
existing_spend = 0
|
||||||
|
existing_spend = LiteLLM_UserTable(user_id=id, spend=0)
|
||||||
else:
|
else:
|
||||||
existing_spend = existing_spend_obj.spend
|
existing_spend = existing_spend_obj.spend
|
||||||
|
|
||||||
|
@ -1624,7 +1626,7 @@ async def startup_event():
|
||||||
):
|
):
|
||||||
# add proxy budget to db in the user table
|
# add proxy budget to db in the user table
|
||||||
await generate_key_helper_fn(
|
await generate_key_helper_fn(
|
||||||
user_id="litellm-proxy-budget",
|
user_id=litellm_proxy_budget_name,
|
||||||
duration=None,
|
duration=None,
|
||||||
models=[],
|
models=[],
|
||||||
aliases={},
|
aliases={},
|
||||||
|
|
|
@ -454,8 +454,7 @@ class PrismaClient:
|
||||||
# Execute the raw query
|
# Execute the raw query
|
||||||
# The asterisk before `user_id_list` unpacks the list into separate arguments
|
# The asterisk before `user_id_list` unpacks the list into separate arguments
|
||||||
response = await self.db.query_raw(sql_query)
|
response = await self.db.query_raw(sql_query)
|
||||||
return response
|
elif query_type == "find_all":
|
||||||
elif table_name == "user" and query_type == "find_all":
|
|
||||||
response = await self.db.litellm_usertable.find_many( # type: ignore
|
response = await self.db.litellm_usertable.find_many( # type: ignore
|
||||||
order={"spend": "desc"},
|
order={"spend": "desc"},
|
||||||
)
|
)
|
||||||
|
|
|
@ -24,7 +24,7 @@ from fastapi import Request
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
import os, io
|
import os, io, time
|
||||||
|
|
||||||
# this file is to test litellm/proxy
|
# this file is to test litellm/proxy
|
||||||
|
|
||||||
|
@ -83,6 +83,7 @@ def prisma_client():
|
||||||
|
|
||||||
# Reset litellm.proxy.proxy_server.prisma_client to None
|
# Reset litellm.proxy.proxy_server.prisma_client to None
|
||||||
litellm.proxy.proxy_server.custom_db_client = None
|
litellm.proxy.proxy_server.custom_db_client = None
|
||||||
|
litellm.proxy.proxy_server.litellm_proxy_budget_name = "litellm-proxy-budget"
|
||||||
|
|
||||||
return prisma_client
|
return prisma_client
|
||||||
|
|
||||||
|
@ -282,6 +283,90 @@ def test_call_with_user_over_budget(prisma_client):
|
||||||
print(vars(e))
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
|
def test_call_with_proxy_over_budget(prisma_client):
|
||||||
|
# 5.1 Make a call with a proxy over budget, expect to fail
|
||||||
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
litellm_proxy_budget_name = f"litellm-proxy-budget-{time.time()}"
|
||||||
|
setattr(
|
||||||
|
litellm.proxy.proxy_server,
|
||||||
|
"litellm_proxy_budget_name",
|
||||||
|
litellm_proxy_budget_name,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||||
|
## CREATE PROXY + USER BUDGET ##
|
||||||
|
request = NewUserRequest(
|
||||||
|
max_budget=0.00001, user_id=litellm_proxy_budget_name
|
||||||
|
)
|
||||||
|
await new_user(request)
|
||||||
|
request = NewUserRequest()
|
||||||
|
key = await new_user(request)
|
||||||
|
print(key)
|
||||||
|
|
||||||
|
generated_key = key.key
|
||||||
|
user_id = key.user_id
|
||||||
|
bearer_token = "Bearer " + generated_key
|
||||||
|
|
||||||
|
request = Request(scope={"type": "http"})
|
||||||
|
request._url = URL(url="/chat/completions")
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
|
||||||
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
|
||||||
|
resp = ModelResponse(
|
||||||
|
id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac",
|
||||||
|
choices=[
|
||||||
|
Choices(
|
||||||
|
finish_reason=None,
|
||||||
|
index=0,
|
||||||
|
message=Message(
|
||||||
|
content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a",
|
||||||
|
role="assistant",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
model="gpt-35-turbo", # azure always has model written like this
|
||||||
|
usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410),
|
||||||
|
)
|
||||||
|
await track_cost_callback(
|
||||||
|
kwargs={
|
||||||
|
"stream": False,
|
||||||
|
"litellm_params": {
|
||||||
|
"metadata": {
|
||||||
|
"user_api_key": generated_key,
|
||||||
|
"user_api_key_user_id": user_id,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"response_cost": 0.00002,
|
||||||
|
},
|
||||||
|
completion_response=resp,
|
||||||
|
start_time=datetime.now(),
|
||||||
|
end_time=datetime.now(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
pytest.fail(f"This should have failed!. They key crossed it's budget")
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
|
except Exception as e:
|
||||||
|
if hasattr(e, "message"):
|
||||||
|
error_detail = e.message
|
||||||
|
else:
|
||||||
|
error_detail = traceback.format_exc()
|
||||||
|
assert "Authentication Error, ExceededBudget:" in error_detail
|
||||||
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
def test_call_with_user_over_budget_stream(prisma_client):
|
def test_call_with_user_over_budget_stream(prisma_client):
|
||||||
# 6. Make a call with a key over budget, expect to fail
|
# 6. Make a call with a key over budget, expect to fail
|
||||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
@ -358,6 +443,93 @@ def test_call_with_user_over_budget_stream(prisma_client):
|
||||||
print(vars(e))
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
|
def test_call_with_proxy_over_budget_stream(prisma_client):
|
||||||
|
# 6.1 Make a call with a global proxy over budget, expect to fail
|
||||||
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
litellm_proxy_budget_name = f"litellm-proxy-budget-{time.time()}"
|
||||||
|
setattr(
|
||||||
|
litellm.proxy.proxy_server,
|
||||||
|
"litellm_proxy_budget_name",
|
||||||
|
litellm_proxy_budget_name,
|
||||||
|
)
|
||||||
|
from litellm._logging import verbose_proxy_logger
|
||||||
|
import logging
|
||||||
|
|
||||||
|
litellm.set_verbose = True
|
||||||
|
verbose_proxy_logger.setLevel(logging.DEBUG)
|
||||||
|
try:
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||||
|
## CREATE PROXY + USER BUDGET ##
|
||||||
|
request = NewUserRequest(
|
||||||
|
max_budget=0.00001, user_id=litellm_proxy_budget_name
|
||||||
|
)
|
||||||
|
await new_user(request)
|
||||||
|
request = NewUserRequest()
|
||||||
|
key = await new_user(request)
|
||||||
|
print(key)
|
||||||
|
|
||||||
|
generated_key = key.key
|
||||||
|
user_id = key.user_id
|
||||||
|
bearer_token = "Bearer " + generated_key
|
||||||
|
|
||||||
|
request = Request(scope={"type": "http"})
|
||||||
|
request._url = URL(url="/chat/completions")
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
|
||||||
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
|
||||||
|
resp = ModelResponse(
|
||||||
|
id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac",
|
||||||
|
choices=[
|
||||||
|
Choices(
|
||||||
|
finish_reason=None,
|
||||||
|
index=0,
|
||||||
|
message=Message(
|
||||||
|
content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a",
|
||||||
|
role="assistant",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
model="gpt-35-turbo", # azure always has model written like this
|
||||||
|
usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410),
|
||||||
|
)
|
||||||
|
await track_cost_callback(
|
||||||
|
kwargs={
|
||||||
|
"stream": True,
|
||||||
|
"complete_streaming_response": resp,
|
||||||
|
"litellm_params": {
|
||||||
|
"metadata": {
|
||||||
|
"user_api_key": generated_key,
|
||||||
|
"user_api_key_user_id": user_id,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"response_cost": 0.00002,
|
||||||
|
},
|
||||||
|
completion_response=ModelResponse(),
|
||||||
|
start_time=datetime.now(),
|
||||||
|
end_time=datetime.now(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
pytest.fail(f"This should have failed!. They key crossed it's budget")
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
|
except Exception as e:
|
||||||
|
error_detail = e.message
|
||||||
|
assert "Authentication Error, ExceededBudget:" in error_detail
|
||||||
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
def test_generate_and_call_with_valid_key_never_expires(prisma_client):
|
def test_generate_and_call_with_valid_key_never_expires(prisma_client):
|
||||||
# 7. Make a call with an key that never expires, expect to pass
|
# 7. Make a call with an key that never expires, expect to pass
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue