diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index 13c086285..a3d1b4815 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -346,11 +346,12 @@ class LiteLLM_SpendLogs(LiteLLMBase): model: Optional[str] = "" call_type: str spend: Optional[float] = 0.0 + total_tokens: Optional[float] = 0.0 + prompt_tokens: Optional[float] = 0.0 + completion_tokens: Optional[float] = 0.0 startTime: Union[str, datetime, None] endTime: Union[str, datetime, None] user: Optional[str] = "" - modelParameters: Optional[Json] = {} - usage: Optional[Json] = {} metadata: Optional[Json] = {} cache_hit: Optional[str] = "False" cache_key: Optional[str] = None diff --git a/litellm/proxy/schema.prisma b/litellm/proxy/schema.prisma index f06d42ba5..2d8b0e662 100644 --- a/litellm/proxy/schema.prisma +++ b/litellm/proxy/schema.prisma @@ -50,12 +50,13 @@ model LiteLLM_SpendLogs { call_type String api_key String @default ("") spend Float @default(0.0) + total_tokens Float @default(0.0) + prompt_tokens Float @default(0.0) + completion_tokens Float @default(0.0) startTime DateTime // Assuming start_time is a DateTime field endTime DateTime // Assuming end_time is a DateTime field model String @default("") user String @default("") - modelParameters Json @default("{}")// Assuming optional_params is a JSON field - usage Json @default("{}") metadata Json @default("{}") cache_hit String @default("") cache_key String @default("") diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 713f117ca..375f39338 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -1059,10 +1059,11 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time): metadata = ( litellm_params.get("metadata", {}) or {} ) # if litellm_params['metadata'] == None - optional_params = kwargs.get("optional_params", {}) call_type = kwargs.get("call_type", "litellm.completion") cache_hit = kwargs.get("cache_hit", False) usage = response_obj["usage"] + if type(usage) == litellm.Usage: + usage = dict(usage) id = response_obj.get("id", str(uuid.uuid4())) api_key = metadata.get("user_api_key", "") if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"): @@ -1090,10 +1091,11 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time): "endTime": end_time, "model": kwargs.get("model", ""), "user": kwargs.get("user", ""), - "modelParameters": optional_params, - "usage": usage, "metadata": metadata, "cache_key": cache_key, + "total_tokens": usage.get("total_tokens", 0), + "prompt_tokens": usage.get("prompt_tokens", 0), + "completion_tokens": usage.get("completion_tokens", 0), } json_fields = [ @@ -1118,8 +1120,6 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time): payload[param] = payload[param].model_dump_json() if type(payload[param]) == litellm.EmbeddingResponse: payload[param] = payload[param].model_dump_json() - elif type(payload[param]) == litellm.Usage: - payload[param] = payload[param].model_dump_json() else: payload[param] = json.dumps(payload[param]) diff --git a/schema.prisma b/schema.prisma index 72d14e13b..103186aae 100644 --- a/schema.prisma +++ b/schema.prisma @@ -53,12 +53,13 @@ model LiteLLM_SpendLogs { call_type String api_key String @default ("") spend Float @default(0.0) + total_tokens Float @default(0.0) + prompt_tokens Float @default(0.0) + completion_tokens Float @default(0.0) startTime DateTime // Assuming start_time is a DateTime field endTime DateTime // Assuming end_time is a DateTime field model String @default("") user String @default("") - modelParameters Json @default("{}")// Assuming optional_params is a JSON field - usage Json @default("{}") metadata Json @default("{}") cache_hit String @default("") cache_key String @default("")