forked from phoenix/litellm-mirror
(test) budgets per key
This commit is contained in:
parent
7ed3141c7d
commit
de1502658d
1 changed files with 152 additions and 4 deletions
|
@ -3,13 +3,15 @@
|
||||||
# 2. Make a call with invalid key, expect it to fail
|
# 2. Make a call with invalid key, expect it to fail
|
||||||
# 3. Make a call to a key with invalid model - expect to fail
|
# 3. Make a call to a key with invalid model - expect to fail
|
||||||
# 4. Make a call to a key with valid model - expect to pass
|
# 4. Make a call to a key with valid model - expect to pass
|
||||||
# 5. Make a call with key over budget, expect to fail
|
# 5. Make a call with user over budget, expect to fail
|
||||||
# 6. Make a streaming chat/completions call with key over budget, expect to fail
|
# 6. Make a streaming chat/completions call with user over budget, expect to fail
|
||||||
# 7. Make a call with an key that never expires, expect to pass
|
# 7. Make a call with an key that never expires, expect to pass
|
||||||
# 8. Make a call with an expired key, expect to fail
|
# 8. Make a call with an expired key, expect to fail
|
||||||
# 9. Delete a Key
|
# 9. Delete a Key
|
||||||
# 10. Generate a key, call key/info. Assert info returned is the same as generated key info
|
# 10. Generate a key, call key/info. Assert info returned is the same as generated key info
|
||||||
# 11. Generate a Key, cal key/info, call key/update, call key/info
|
# 11. Generate a Key, cal key/info, call key/update, call key/info
|
||||||
|
# 12. Make a call with key over budget, expect to fail
|
||||||
|
# 14. Make a streaming chat/completions call with key over budget, expect to fail
|
||||||
|
|
||||||
|
|
||||||
# function to call to generate key - async def new_user(data: NewUserRequest):
|
# function to call to generate key - async def new_user(data: NewUserRequest):
|
||||||
|
@ -38,6 +40,7 @@ from litellm.proxy.proxy_server import (
|
||||||
delete_key_fn,
|
delete_key_fn,
|
||||||
info_key_fn,
|
info_key_fn,
|
||||||
update_key_fn,
|
update_key_fn,
|
||||||
|
generate_key_fn,
|
||||||
)
|
)
|
||||||
from litellm.proxy.utils import PrismaClient, ProxyLogging
|
from litellm.proxy.utils import PrismaClient, ProxyLogging
|
||||||
from litellm._logging import verbose_proxy_logger
|
from litellm._logging import verbose_proxy_logger
|
||||||
|
@ -46,6 +49,7 @@ verbose_proxy_logger.setLevel(level=logging.DEBUG)
|
||||||
|
|
||||||
from litellm.proxy._types import (
|
from litellm.proxy._types import (
|
||||||
NewUserRequest,
|
NewUserRequest,
|
||||||
|
GenerateKeyRequest,
|
||||||
DynamoDBArgs,
|
DynamoDBArgs,
|
||||||
DeleteKeyRequest,
|
DeleteKeyRequest,
|
||||||
UpdateKeyRequest,
|
UpdateKeyRequest,
|
||||||
|
@ -203,7 +207,7 @@ def test_call_with_valid_model(prisma_client):
|
||||||
pytest.fail(f"An exception occurred - {str(e)}")
|
pytest.fail(f"An exception occurred - {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
def test_call_with_key_over_budget(prisma_client):
|
def test_call_with_user_over_budget(prisma_client):
|
||||||
# 5. Make a call with a key over budget, expect to fail
|
# 5. Make a call with a key over budget, expect to fail
|
||||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
@ -272,7 +276,7 @@ def test_call_with_key_over_budget(prisma_client):
|
||||||
print(vars(e))
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
def test_call_with_key_over_budget_stream(prisma_client):
|
def test_call_with_user_over_budget_stream(prisma_client):
|
||||||
# 6. Make a call with a key over budget, expect to fail
|
# 6. Make a call with a key over budget, expect to fail
|
||||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
@ -593,3 +597,147 @@ def test_generate_and_update_key(prisma_client):
|
||||||
print("Got Exception", e)
|
print("Got Exception", e)
|
||||||
print(e.detail)
|
print(e.detail)
|
||||||
pytest.fail(f"An exception occurred - {str(e)}")
|
pytest.fail(f"An exception occurred - {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def test_call_with_key_over_budget(prisma_client):
|
||||||
|
# 12. Make a call with a key over budget, expect to fail
|
||||||
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
try:
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||||
|
request = GenerateKeyRequest(max_budget=0.00001)
|
||||||
|
key = await generate_key_fn(request)
|
||||||
|
print(key)
|
||||||
|
|
||||||
|
generated_key = key.key
|
||||||
|
user_id = key.user_id
|
||||||
|
bearer_token = "Bearer " + generated_key
|
||||||
|
|
||||||
|
request = Request(scope={"type": "http"})
|
||||||
|
request._url = URL(url="/chat/completions")
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
|
||||||
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
|
||||||
|
resp = ModelResponse(
|
||||||
|
id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac",
|
||||||
|
choices=[
|
||||||
|
Choices(
|
||||||
|
finish_reason=None,
|
||||||
|
index=0,
|
||||||
|
message=Message(
|
||||||
|
content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a",
|
||||||
|
role="assistant",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
model="gpt-35-turbo", # azure always has model written like this
|
||||||
|
usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410),
|
||||||
|
)
|
||||||
|
await track_cost_callback(
|
||||||
|
kwargs={
|
||||||
|
"stream": False,
|
||||||
|
"litellm_params": {
|
||||||
|
"metadata": {
|
||||||
|
"user_api_key": generated_key,
|
||||||
|
"user_api_key_user_id": user_id,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
completion_response=resp,
|
||||||
|
start_time=datetime.now(),
|
||||||
|
end_time=datetime.now(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
pytest.fail(f"This should have failed!. They key crossed it's budget")
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
|
except Exception as e:
|
||||||
|
error_detail = e.detail
|
||||||
|
assert "Authentication Error, ExceededBudget:" in error_detail
|
||||||
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
|
def test_call_with_key_over_budget_stream(prisma_client):
|
||||||
|
# 14. Make a call with a key over budget, expect to fail
|
||||||
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
from litellm._logging import verbose_proxy_logger
|
||||||
|
import logging
|
||||||
|
|
||||||
|
litellm.set_verbose = True
|
||||||
|
verbose_proxy_logger.setLevel(logging.DEBUG)
|
||||||
|
try:
|
||||||
|
|
||||||
|
async def test():
|
||||||
|
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||||
|
request = NewUserRequest(max_budget=0.00001)
|
||||||
|
key = await new_user(request)
|
||||||
|
print(key)
|
||||||
|
|
||||||
|
generated_key = key.key
|
||||||
|
user_id = key.user_id
|
||||||
|
bearer_token = "Bearer " + generated_key
|
||||||
|
|
||||||
|
request = Request(scope={"type": "http"})
|
||||||
|
request._url = URL(url="/chat/completions")
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
|
||||||
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
|
||||||
|
resp = ModelResponse(
|
||||||
|
id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac",
|
||||||
|
choices=[
|
||||||
|
Choices(
|
||||||
|
finish_reason=None,
|
||||||
|
index=0,
|
||||||
|
message=Message(
|
||||||
|
content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a",
|
||||||
|
role="assistant",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
],
|
||||||
|
model="gpt-35-turbo", # azure always has model written like this
|
||||||
|
usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410),
|
||||||
|
)
|
||||||
|
await track_cost_callback(
|
||||||
|
kwargs={
|
||||||
|
"stream": True,
|
||||||
|
"complete_streaming_response": resp,
|
||||||
|
"litellm_params": {
|
||||||
|
"metadata": {
|
||||||
|
"user_api_key": generated_key,
|
||||||
|
"user_api_key_user_id": user_id,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
completion_response=ModelResponse(),
|
||||||
|
start_time=datetime.now(),
|
||||||
|
end_time=datetime.now(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# use generated key to auth in
|
||||||
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
pytest.fail(f"This should have failed!. They key crossed it's budget")
|
||||||
|
|
||||||
|
asyncio.run(test())
|
||||||
|
except Exception as e:
|
||||||
|
error_detail = e.detail
|
||||||
|
assert "Authentication Error, ExceededBudget:" in error_detail
|
||||||
|
print(vars(e))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue