From 2130a61b6eabbb22f1013e71e724ff2ebedd97e8 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 24 Jan 2024 17:56:00 -0800 Subject: [PATCH] (feat) add cache_key in spend_log --- litellm/proxy/_types.py | 3 +-- litellm/proxy/schema.prisma | 1 + litellm/proxy/utils.py | 5 +++++ litellm/tests/test_key_generate_prisma.py | 4 ++++ schema.prisma | 1 + 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index 8a059c507..670cefcf2 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -343,8 +343,7 @@ class LiteLLM_SpendLogs(LiteLLMBase): endTime: Union[str, datetime, None] user: Optional[str] = "" modelParameters: Optional[Json] = {} - messages: Optional[Json] = [] - response: Optional[Json] = {} usage: Optional[Json] = {} metadata: Optional[Json] = {} cache_hit: Optional[str] = "False" + cache_key: Optional[str] = None diff --git a/litellm/proxy/schema.prisma b/litellm/proxy/schema.prisma index 441c3515f..f06d42ba5 100644 --- a/litellm/proxy/schema.prisma +++ b/litellm/proxy/schema.prisma @@ -58,4 +58,5 @@ model LiteLLM_SpendLogs { usage Json @default("{}") metadata Json @default("{}") cache_hit String @default("") + cache_key String @default("") } \ No newline at end of file diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 15f230a6a..d49ace138 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -995,6 +995,10 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time): if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"): # hash the api_key api_key = hash_token(api_key) + from litellm.caching import Cache + + c = Cache() + cache_key = c.get_cache_key(**kwargs) if "headers" in metadata and "authorization" in metadata["headers"]: metadata["headers"].pop( @@ -1013,6 +1017,7 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time): "modelParameters": optional_params, "usage": usage, "metadata": metadata, + "cache_key": cache_key, } json_fields = [ diff --git a/litellm/tests/test_key_generate_prisma.py b/litellm/tests/test_key_generate_prisma.py index 49f091cd6..f7f1d0a91 100644 --- a/litellm/tests/test_key_generate_prisma.py +++ b/litellm/tests/test_key_generate_prisma.py @@ -763,6 +763,10 @@ def test_call_with_key_over_budget(prisma_client): assert spend_log.request_id == request_id assert spend_log.spend == float("2e-05") assert spend_log.model == "chatgpt-v-2" + assert ( + spend_log.cache_key + == "a61ae14fe4a8b8014a61e6ae01a100c8bc6770ac37c293242afed954bc69207d" + ) # use generated key to auth in result = await user_api_key_auth(request=request, api_key=bearer_token) diff --git a/schema.prisma b/schema.prisma index 0dd11eb64..72d14e13b 100644 --- a/schema.prisma +++ b/schema.prisma @@ -61,4 +61,5 @@ model LiteLLM_SpendLogs { usage Json @default("{}") metadata Json @default("{}") cache_hit String @default("") + cache_key String @default("") } \ No newline at end of file