forked from phoenix/litellm-mirror
(feat) SpendLogs show total_tokens, prompt_tokens, completion_tokens
This commit is contained in:
parent
b9fc2c3735
commit
64f1301033
4 changed files with 14 additions and 6 deletions
|
@ -346,10 +346,12 @@ class LiteLLM_SpendLogs(LiteLLMBase):
|
||||||
model: Optional[str] = ""
|
model: Optional[str] = ""
|
||||||
call_type: str
|
call_type: str
|
||||||
spend: Optional[float] = 0.0
|
spend: Optional[float] = 0.0
|
||||||
|
total_tokens: Optional[float] = 0.0
|
||||||
|
prompt_tokens: Optional[float] = 0.0
|
||||||
|
completion_tokens: Optional[float] = 0.0
|
||||||
startTime: Union[str, datetime, None]
|
startTime: Union[str, datetime, None]
|
||||||
endTime: Union[str, datetime, None]
|
endTime: Union[str, datetime, None]
|
||||||
user: Optional[str] = ""
|
user: Optional[str] = ""
|
||||||
usage: Optional[Json] = {}
|
|
||||||
metadata: Optional[Json] = {}
|
metadata: Optional[Json] = {}
|
||||||
cache_hit: Optional[str] = "False"
|
cache_hit: Optional[str] = "False"
|
||||||
cache_key: Optional[str] = None
|
cache_key: Optional[str] = None
|
||||||
|
|
|
@ -50,11 +50,13 @@ model LiteLLM_SpendLogs {
|
||||||
call_type String
|
call_type String
|
||||||
api_key String @default ("")
|
api_key String @default ("")
|
||||||
spend Float @default(0.0)
|
spend Float @default(0.0)
|
||||||
|
total_tokens Float @default(0.0)
|
||||||
|
prompt_tokens Float @default(0.0)
|
||||||
|
completion_tokens Float @default(0.0)
|
||||||
startTime DateTime // Assuming start_time is a DateTime field
|
startTime DateTime // Assuming start_time is a DateTime field
|
||||||
endTime DateTime // Assuming end_time is a DateTime field
|
endTime DateTime // Assuming end_time is a DateTime field
|
||||||
model String @default("")
|
model String @default("")
|
||||||
user String @default("")
|
user String @default("")
|
||||||
usage Json @default("{}")
|
|
||||||
metadata Json @default("{}")
|
metadata Json @default("{}")
|
||||||
cache_hit String @default("")
|
cache_hit String @default("")
|
||||||
cache_key String @default("")
|
cache_key String @default("")
|
||||||
|
|
|
@ -1062,6 +1062,8 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
call_type = kwargs.get("call_type", "litellm.completion")
|
call_type = kwargs.get("call_type", "litellm.completion")
|
||||||
cache_hit = kwargs.get("cache_hit", False)
|
cache_hit = kwargs.get("cache_hit", False)
|
||||||
usage = response_obj["usage"]
|
usage = response_obj["usage"]
|
||||||
|
if type(usage) == litellm.Usage:
|
||||||
|
usage = dict(usage)
|
||||||
id = response_obj.get("id", str(uuid.uuid4()))
|
id = response_obj.get("id", str(uuid.uuid4()))
|
||||||
api_key = metadata.get("user_api_key", "")
|
api_key = metadata.get("user_api_key", "")
|
||||||
if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"):
|
if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"):
|
||||||
|
@ -1089,9 +1091,11 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
"endTime": end_time,
|
"endTime": end_time,
|
||||||
"model": kwargs.get("model", ""),
|
"model": kwargs.get("model", ""),
|
||||||
"user": kwargs.get("user", ""),
|
"user": kwargs.get("user", ""),
|
||||||
"usage": usage,
|
|
||||||
"metadata": metadata,
|
"metadata": metadata,
|
||||||
"cache_key": cache_key,
|
"cache_key": cache_key,
|
||||||
|
"total_tokens": usage.get("total_tokens", 0),
|
||||||
|
"prompt_tokens": usage.get("prompt_tokens", 0),
|
||||||
|
"completion_tokens": usage.get("completion_tokens", 0),
|
||||||
}
|
}
|
||||||
|
|
||||||
json_fields = [
|
json_fields = [
|
||||||
|
@ -1116,8 +1120,6 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
payload[param] = payload[param].model_dump_json()
|
payload[param] = payload[param].model_dump_json()
|
||||||
if type(payload[param]) == litellm.EmbeddingResponse:
|
if type(payload[param]) == litellm.EmbeddingResponse:
|
||||||
payload[param] = payload[param].model_dump_json()
|
payload[param] = payload[param].model_dump_json()
|
||||||
elif type(payload[param]) == litellm.Usage:
|
|
||||||
payload[param] = payload[param].model_dump_json()
|
|
||||||
else:
|
else:
|
||||||
payload[param] = json.dumps(payload[param])
|
payload[param] = json.dumps(payload[param])
|
||||||
|
|
||||||
|
|
|
@ -53,11 +53,13 @@ model LiteLLM_SpendLogs {
|
||||||
call_type String
|
call_type String
|
||||||
api_key String @default ("")
|
api_key String @default ("")
|
||||||
spend Float @default(0.0)
|
spend Float @default(0.0)
|
||||||
|
total_tokens Float @default(0.0)
|
||||||
|
prompt_tokens Float @default(0.0)
|
||||||
|
completion_tokens Float @default(0.0)
|
||||||
startTime DateTime // Assuming start_time is a DateTime field
|
startTime DateTime // Assuming start_time is a DateTime field
|
||||||
endTime DateTime // Assuming end_time is a DateTime field
|
endTime DateTime // Assuming end_time is a DateTime field
|
||||||
model String @default("")
|
model String @default("")
|
||||||
user String @default("")
|
user String @default("")
|
||||||
usage Json @default("{}")
|
|
||||||
metadata Json @default("{}")
|
metadata Json @default("{}")
|
||||||
cache_hit String @default("")
|
cache_hit String @default("")
|
||||||
cache_key String @default("")
|
cache_key String @default("")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue