mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
test: handle anthropic api instability
This commit is contained in:
parent
b86c75b0ba
commit
f41f938c21
1 changed files with 56 additions and 61 deletions
|
@ -47,6 +47,7 @@ def _usage_format_tests(usage: litellm.Usage):
|
|||
],
|
||||
)
|
||||
def test_prompt_caching_model(model):
|
||||
try:
|
||||
for _ in range(2):
|
||||
response = litellm.completion(
|
||||
model=model,
|
||||
|
@ -103,14 +104,8 @@ def test_prompt_caching_model(model):
|
|||
|
||||
assert "prompt_tokens_details" in response.usage
|
||||
assert response.usage.prompt_tokens_details.cached_tokens > 0
|
||||
|
||||
# assert "cache_read_input_tokens" in response.usage
|
||||
# assert "cache_creation_input_tokens" in response.usage
|
||||
|
||||
# # Assert either a cache entry was created or cache was read - changes depending on the anthropic api ttl
|
||||
# assert (response.usage.cache_read_input_tokens > 0) or (
|
||||
# response.usage.cache_creation_input_tokens > 0
|
||||
# )
|
||||
except litellm.InternalServerError:
|
||||
pass
|
||||
|
||||
|
||||
def test_supports_prompt_caching():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue