mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(types/utils.py): support passing prompt cache usage stats in usage object
Passes deepseek prompt caching values through to end user
This commit is contained in:
parent
cd073d5ad3
commit
0a30ba9674
3 changed files with 40 additions and 9 deletions
|
@ -5825,6 +5825,8 @@ def convert_to_model_response_object(
|
|||
model_response_object.usage.completion_tokens = response_object["usage"].get("completion_tokens", 0) # type: ignore
|
||||
model_response_object.usage.prompt_tokens = response_object["usage"].get("prompt_tokens", 0) # type: ignore
|
||||
model_response_object.usage.total_tokens = response_object["usage"].get("total_tokens", 0) # type: ignore
|
||||
model_response_object.usage.prompt_cache_hit_tokens = response_object["usage"].get("prompt_cache_hit_tokens", None) # type: ignore
|
||||
model_response_object.usage.prompt_cache_miss_tokens = response_object["usage"].get("prompt_cache_miss_tokens", None) # type: ignore
|
||||
|
||||
if "created" in response_object:
|
||||
model_response_object.created = response_object["created"] or int(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue