mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
Add pyright to ci/cd + Fix remaining type-checking errors (#6082)
* fix: fix type-checking errors * fix: fix additional type-checking errors * fix: additional type-checking error fixes * fix: fix additional type-checking errors * fix: additional type-check fixes * fix: fix all type-checking errors + add pyright to ci/cd * fix: fix incorrect import * ci(config.yml): use mypy on ci/cd * fix: fix type-checking errors in utils.py * fix: fix all type-checking errors on main.py * fix: fix mypy linting errors * fix(anthropic/cost_calculator.py): fix linting errors * fix: fix mypy linting errors * fix: fix linting errors
This commit is contained in:
parent
f7ce1173f3
commit
fac3b2ee42
65 changed files with 619 additions and 522 deletions
|
@ -10,12 +10,40 @@ import litellm
|
|||
import pytest
|
||||
|
||||
|
||||
def _usage_format_tests(usage: litellm.Usage):
|
||||
"""
|
||||
OpenAI prompt caching
|
||||
- prompt_tokens = sum of non-cache hit tokens + cache-hit tokens
|
||||
- total_tokens = prompt_tokens + completion_tokens
|
||||
|
||||
Example
|
||||
```
|
||||
"usage": {
|
||||
"prompt_tokens": 2006,
|
||||
"completion_tokens": 300,
|
||||
"total_tokens": 2306,
|
||||
"prompt_tokens_details": {
|
||||
"cached_tokens": 1920
|
||||
},
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": 0
|
||||
}
|
||||
# ANTHROPIC_ONLY #
|
||||
"cache_creation_input_tokens": 0
|
||||
}
|
||||
```
|
||||
"""
|
||||
assert usage.total_tokens == usage.prompt_tokens + usage.completion_tokens
|
||||
|
||||
assert usage.prompt_tokens > usage.prompt_tokens_details.cached_tokens
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[
|
||||
"anthropic/claude-3-5-sonnet-20240620",
|
||||
"openai/gpt-4o",
|
||||
"deepseek/deepseek-chat",
|
||||
# "openai/gpt-4o",
|
||||
# "deepseek/deepseek-chat",
|
||||
],
|
||||
)
|
||||
def test_prompt_caching_model(model):
|
||||
|
@ -66,9 +94,13 @@ def test_prompt_caching_model(model):
|
|||
max_tokens=10,
|
||||
)
|
||||
|
||||
_usage_format_tests(response.usage)
|
||||
|
||||
print("response=", response)
|
||||
print("response.usage=", response.usage)
|
||||
|
||||
_usage_format_tests(response.usage)
|
||||
|
||||
assert "prompt_tokens_details" in response.usage
|
||||
assert response.usage.prompt_tokens_details.cached_tokens > 0
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue