forked from phoenix/litellm-mirror
Merge pull request #1591 from BerriAI/litellm_test_spend_logging
[Test] Proxy - writing spend logs table
This commit is contained in:
commit
c356dfa08f
2 changed files with 39 additions and 3 deletions
|
@ -892,6 +892,10 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
from pydantic import Json
|
from pydantic import Json
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
|
verbose_proxy_logger.debug(
|
||||||
|
f"SpendTable: get_logging_payload - kwargs: {kwargs}\n\n"
|
||||||
|
)
|
||||||
|
|
||||||
if kwargs == None:
|
if kwargs == None:
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
# standardize this function to be used across, s3, dynamoDB, langfuse logging
|
# standardize this function to be used across, s3, dynamoDB, langfuse logging
|
||||||
|
|
|
@ -42,6 +42,7 @@ from litellm.proxy.proxy_server import (
|
||||||
info_key_fn,
|
info_key_fn,
|
||||||
update_key_fn,
|
update_key_fn,
|
||||||
generate_key_fn,
|
generate_key_fn,
|
||||||
|
view_spend_logs,
|
||||||
)
|
)
|
||||||
from litellm.proxy.utils import PrismaClient, ProxyLogging
|
from litellm.proxy.utils import PrismaClient, ProxyLogging
|
||||||
from litellm._logging import verbose_proxy_logger
|
from litellm._logging import verbose_proxy_logger
|
||||||
|
@ -713,9 +714,12 @@ def test_call_with_key_over_budget(prisma_client):
|
||||||
# update spend using track_cost callback, make 2nd request, it should fail
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
from litellm.proxy.proxy_server import track_cost_callback
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
from litellm import ModelResponse, Choices, Message, Usage
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
import time
|
||||||
|
|
||||||
|
request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{time.time()}"
|
||||||
|
|
||||||
resp = ModelResponse(
|
resp = ModelResponse(
|
||||||
id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac",
|
id=request_id,
|
||||||
choices=[
|
choices=[
|
||||||
Choices(
|
Choices(
|
||||||
finish_reason=None,
|
finish_reason=None,
|
||||||
|
@ -731,6 +735,7 @@ def test_call_with_key_over_budget(prisma_client):
|
||||||
)
|
)
|
||||||
await track_cost_callback(
|
await track_cost_callback(
|
||||||
kwargs={
|
kwargs={
|
||||||
|
"model": "chatgpt-v-2",
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"litellm_params": {
|
"litellm_params": {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -745,6 +750,18 @@ def test_call_with_key_over_budget(prisma_client):
|
||||||
end_time=datetime.now(),
|
end_time=datetime.now(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# test spend_log was written and we can read it
|
||||||
|
spend_logs = await view_spend_logs(request_id=request_id)
|
||||||
|
|
||||||
|
print("read spend logs", spend_logs)
|
||||||
|
assert len(spend_logs) == 1
|
||||||
|
|
||||||
|
spend_log = spend_logs[0]
|
||||||
|
|
||||||
|
assert spend_log.request_id == request_id
|
||||||
|
assert spend_log.spend == float("2e-05")
|
||||||
|
assert spend_log.model == "chatgpt-v-2"
|
||||||
|
|
||||||
# use generated key to auth in
|
# use generated key to auth in
|
||||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
print("result from user auth with new key", result)
|
print("result from user auth with new key", result)
|
||||||
|
@ -788,9 +805,11 @@ def test_call_with_key_over_budget_stream(prisma_client):
|
||||||
# update spend using track_cost callback, make 2nd request, it should fail
|
# update spend using track_cost callback, make 2nd request, it should fail
|
||||||
from litellm.proxy.proxy_server import track_cost_callback
|
from litellm.proxy.proxy_server import track_cost_callback
|
||||||
from litellm import ModelResponse, Choices, Message, Usage
|
from litellm import ModelResponse, Choices, Message, Usage
|
||||||
|
import time
|
||||||
|
|
||||||
|
request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{time.time()}"
|
||||||
resp = ModelResponse(
|
resp = ModelResponse(
|
||||||
id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac",
|
id=request_id,
|
||||||
choices=[
|
choices=[
|
||||||
Choices(
|
Choices(
|
||||||
finish_reason=None,
|
finish_reason=None,
|
||||||
|
@ -806,6 +825,7 @@ def test_call_with_key_over_budget_stream(prisma_client):
|
||||||
)
|
)
|
||||||
await track_cost_callback(
|
await track_cost_callback(
|
||||||
kwargs={
|
kwargs={
|
||||||
|
"model": "sagemaker-chatgpt-v-2",
|
||||||
"stream": True,
|
"stream": True,
|
||||||
"complete_streaming_response": resp,
|
"complete_streaming_response": resp,
|
||||||
"litellm_params": {
|
"litellm_params": {
|
||||||
|
@ -814,13 +834,25 @@ def test_call_with_key_over_budget_stream(prisma_client):
|
||||||
"user_api_key_user_id": user_id,
|
"user_api_key_user_id": user_id,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"response_cost": 0.00002,
|
"response_cost": 0.00005,
|
||||||
},
|
},
|
||||||
completion_response=ModelResponse(),
|
completion_response=ModelResponse(),
|
||||||
start_time=datetime.now(),
|
start_time=datetime.now(),
|
||||||
end_time=datetime.now(),
|
end_time=datetime.now(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# test spend_log was written and we can read it
|
||||||
|
spend_logs = await view_spend_logs(request_id=request_id)
|
||||||
|
|
||||||
|
print("read spend logs", spend_logs)
|
||||||
|
assert len(spend_logs) == 1
|
||||||
|
|
||||||
|
spend_log = spend_logs[0]
|
||||||
|
|
||||||
|
assert spend_log.request_id == request_id
|
||||||
|
assert spend_log.spend == float("5e-05")
|
||||||
|
assert spend_log.model == "sagemaker-chatgpt-v-2"
|
||||||
|
|
||||||
# use generated key to auth in
|
# use generated key to auth in
|
||||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
print("result from user auth with new key", result)
|
print("result from user auth with new key", result)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue