mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
test(test_streaming.py): set cache to none
This commit is contained in:
parent
2c32f4a588
commit
697497cdfa
2 changed files with 4 additions and 1 deletions
|
@ -14,7 +14,7 @@ import litellm
|
|||
from litellm import embedding, completion, completion_cost
|
||||
from litellm import RateLimitError
|
||||
litellm.num_retries = 3
|
||||
|
||||
litellm.cache = None
|
||||
user_message = "Write a short poem about the sky"
|
||||
messages = [{"content": user_message, "role": "user"}]
|
||||
|
||||
|
@ -24,6 +24,7 @@ def logger_fn(user_model_dict):
|
|||
|
||||
def test_completion_custom_provider_model_name():
|
||||
try:
|
||||
litellm.cache = None
|
||||
response = completion(
|
||||
model="together_ai/togethercomputer/llama-2-70b-chat",
|
||||
messages=messages,
|
||||
|
@ -41,6 +42,7 @@ def test_completion_custom_provider_model_name():
|
|||
|
||||
def test_completion_claude():
|
||||
litellm.set_verbose = False
|
||||
litellm.cache = None
|
||||
litellm.AnthropicConfig(max_tokens_to_sample=200, metadata={"user_id": "1224"})
|
||||
try:
|
||||
# test without max tokens
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue