mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
(feat) add cache_key in spend_log
This commit is contained in:
parent
d694993703
commit
2130a61b6e
5 changed files with 12 additions and 2 deletions
|
@ -343,8 +343,7 @@ class LiteLLM_SpendLogs(LiteLLMBase):
|
||||||
endTime: Union[str, datetime, None]
|
endTime: Union[str, datetime, None]
|
||||||
user: Optional[str] = ""
|
user: Optional[str] = ""
|
||||||
modelParameters: Optional[Json] = {}
|
modelParameters: Optional[Json] = {}
|
||||||
messages: Optional[Json] = []
|
|
||||||
response: Optional[Json] = {}
|
|
||||||
usage: Optional[Json] = {}
|
usage: Optional[Json] = {}
|
||||||
metadata: Optional[Json] = {}
|
metadata: Optional[Json] = {}
|
||||||
cache_hit: Optional[str] = "False"
|
cache_hit: Optional[str] = "False"
|
||||||
|
cache_key: Optional[str] = None
|
||||||
|
|
|
@ -58,4 +58,5 @@ model LiteLLM_SpendLogs {
|
||||||
usage Json @default("{}")
|
usage Json @default("{}")
|
||||||
metadata Json @default("{}")
|
metadata Json @default("{}")
|
||||||
cache_hit String @default("")
|
cache_hit String @default("")
|
||||||
|
cache_key String @default("")
|
||||||
}
|
}
|
|
@ -995,6 +995,10 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"):
|
if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"):
|
||||||
# hash the api_key
|
# hash the api_key
|
||||||
api_key = hash_token(api_key)
|
api_key = hash_token(api_key)
|
||||||
|
from litellm.caching import Cache
|
||||||
|
|
||||||
|
c = Cache()
|
||||||
|
cache_key = c.get_cache_key(**kwargs)
|
||||||
|
|
||||||
if "headers" in metadata and "authorization" in metadata["headers"]:
|
if "headers" in metadata and "authorization" in metadata["headers"]:
|
||||||
metadata["headers"].pop(
|
metadata["headers"].pop(
|
||||||
|
@ -1013,6 +1017,7 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
"modelParameters": optional_params,
|
"modelParameters": optional_params,
|
||||||
"usage": usage,
|
"usage": usage,
|
||||||
"metadata": metadata,
|
"metadata": metadata,
|
||||||
|
"cache_key": cache_key,
|
||||||
}
|
}
|
||||||
|
|
||||||
json_fields = [
|
json_fields = [
|
||||||
|
|
|
@ -763,6 +763,10 @@ def test_call_with_key_over_budget(prisma_client):
|
||||||
assert spend_log.request_id == request_id
|
assert spend_log.request_id == request_id
|
||||||
assert spend_log.spend == float("2e-05")
|
assert spend_log.spend == float("2e-05")
|
||||||
assert spend_log.model == "chatgpt-v-2"
|
assert spend_log.model == "chatgpt-v-2"
|
||||||
|
assert (
|
||||||
|
spend_log.cache_key
|
||||||
|
== "a61ae14fe4a8b8014a61e6ae01a100c8bc6770ac37c293242afed954bc69207d"
|
||||||
|
)
|
||||||
|
|
||||||
# use generated key to auth in
|
# use generated key to auth in
|
||||||
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
result = await user_api_key_auth(request=request, api_key=bearer_token)
|
||||||
|
|
|
@ -61,4 +61,5 @@ model LiteLLM_SpendLogs {
|
||||||
usage Json @default("{}")
|
usage Json @default("{}")
|
||||||
metadata Json @default("{}")
|
metadata Json @default("{}")
|
||||||
cache_hit String @default("")
|
cache_hit String @default("")
|
||||||
|
cache_key String @default("")
|
||||||
}
|
}
|
Loading…
Add table
Add a link
Reference in a new issue