mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
(test) streaming spend logs test
This commit is contained in:
parent
e73d5bf38b
commit
c94c23df69
1 changed files with 64 additions and 64 deletions
|
@ -775,7 +775,8 @@ def test_call_with_key_over_budget(prisma_client):
|
||||||
print(vars(e))
|
print(vars(e))
|
||||||
|
|
||||||
|
|
||||||
def test_call_with_key_over_budget_stream(prisma_client):
|
@pytest.mark.asyncio()
|
||||||
|
async def test_call_with_key_over_budget_stream(prisma_client):
|
||||||
# 14. Make a call with a key over budget, expect to fail
|
# 14. Make a call with a key over budget, expect to fail
|
||||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||||
|
@ -785,82 +786,81 @@ def test_call_with_key_over_budget_stream(prisma_client):
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
verbose_proxy_logger.setLevel(logging.DEBUG)
|
verbose_proxy_logger.setLevel(logging.DEBUG)
|
||||||
try:
|
try:
|
||||||
|
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||||
|
request = GenerateKeyRequest(max_budget=0.00001)
|
||||||
|
key = await generate_key_fn(request)
|
||||||
|
print(key)
|
||||||
|
|
||||||
async def test():
|
generated_key = key.key
|
||||||
await litellm.proxy.proxy_server.prisma_client.connect()
|
user_id = key.user_id
|
||||||
request = GenerateKeyRequest(max_budget=0.00001)
|
bearer_token = "Bearer " + generated_key
|
||||||
key = await generate_key_fn(request)
|
|
||||||
print(key)
|
|
||||||
|
|
||||||
generated_key = key.key
|
request = Request(scope={"type": "http"})
|
||||||
user_id = key.user_id
|
request._url = URL(url="/chat/completions")
|
||||||
bearer_token = "Bearer " + generated_key
|
|
||||||
|
|
||||||
request = Request(scope={"type": "http"})
|
# use generated key to auth in
|
||||||
request._url = URL(url="/chat/completions")
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
print("result from user auth with new key", result)
|
||||||
|
|
||||||
# use generated key to auth in
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
print("result from user auth with new key", result)
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
import time
|
||||||
|
|
||||||
# update spend using track_cost callback, make 2nd request, it should fail
|
request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{time.time()}"
|
||||||
from litellm.proxy.proxy_server import track_cost_callback
|
resp = ModelResponse(
|
||||||
from litellm import ModelResponse, Choices, Message, Usage
|
id=request_id,
|
||||||
import time
|
choices=[
|
||||||
|
Choices(
|
||||||
request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{time.time()}"
|
finish_reason=None,
|
||||||
resp = ModelResponse(
|
index=0,
|
||||||
id=request_id,
|
message=Message(
|
||||||
choices=[
|
content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a",
|
||||||
Choices(
|
role="assistant",
|
||||||
finish_reason=None,
|
),
|
||||||
index=0,
|
)
|
||||||
message=Message(
|
],
|
||||||
content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a",
|
model="gpt-35-turbo", # azure always has model written like this
|
||||||
role="assistant",
|
usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410),
|
||||||
),
|
)
|
||||||
)
|
await track_cost_callback(
|
||||||
],
|
kwargs={
|
||||||
model="gpt-35-turbo", # azure always has model written like this
|
"call_type": "acompletion",
|
||||||
usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410),
|
"model": "sagemaker-chatgpt-v-2",
|
||||||
)
|
"stream": True,
|
||||||
await track_cost_callback(
|
"complete_streaming_response": resp,
|
||||||
kwargs={
|
"litellm_params": {
|
||||||
"call_type": "acompletion",
|
"metadata": {
|
||||||
"model": "sagemaker-chatgpt-v-2",
|
"user_api_key": generated_key,
|
||||||
"stream": True,
|
"user_api_key_user_id": user_id,
|
||||||
"complete_streaming_response": resp,
|
}
|
||||||
"litellm_params": {
|
|
||||||
"metadata": {
|
|
||||||
"user_api_key": generated_key,
|
|
||||||
"user_api_key_user_id": user_id,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"response_cost": 0.00005,
|
|
||||||
},
|
},
|
||||||
completion_response=ModelResponse(),
|
"response_cost": 0.00005,
|
||||||
start_time=datetime.now(),
|
},
|
||||||
end_time=datetime.now(),
|
completion_response=resp,
|
||||||
)
|
start_time=datetime.now(),
|
||||||
|
end_time=datetime.now(),
|
||||||
|
)
|
||||||
|
|
||||||
# test spend_log was written and we can read it
|
# test spend_log was written and we can read it
|
||||||
spend_logs = await view_spend_logs(request_id=request_id)
|
spend_logs = await view_spend_logs(request_id=request_id)
|
||||||
|
|
||||||
print("read spend logs", spend_logs)
|
print("read spend logs", spend_logs)
|
||||||
assert len(spend_logs) == 1
|
assert len(spend_logs) == 1
|
||||||
|
|
||||||
spend_log = spend_logs[0]
|
spend_log = spend_logs[0]
|
||||||
|
|
||||||
assert spend_log.request_id == request_id
|
assert spend_log.request_id == request_id
|
||||||
assert spend_log.spend == float("5e-05")
|
assert spend_log.spend == float("5e-05")
|
||||||
assert spend_log.model == "sagemaker-chatgpt-v-2"
|
assert spend_log.model == "sagemaker-chatgpt-v-2"
|
||||||
|
|
||||||
# use generated key to auth in
|
# use generated key to auth in
|
||||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
print("result from user auth with new key", result)
|
print("result from user auth with new key", result)
|
||||||
pytest.fail(f"This should have failed!. They key crossed it's budget")
|
pytest.fail(f"This should have failed!. They key crossed it's budget")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
print("Got Exception", e)
|
||||||
error_detail = e.message
|
error_detail = e.message
|
||||||
assert "Authentication Error, ExceededTokenBudget:" in error_detail
|
assert "Authentication Error, ExceededTokenBudget:" in error_detail
|
||||||
print(vars(e))
|
print(vars(e))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue