forked from phoenix/litellm-mirror
(test) caching, swap out unstable model
This commit is contained in:
parent
eb3cab85fb
commit
336c8af01e
1 changed files with 3 additions and 2 deletions
|
@ -127,9 +127,10 @@ def test_caching_with_models_v2():
|
||||||
]
|
]
|
||||||
litellm.cache = Cache()
|
litellm.cache = Cache()
|
||||||
print("test2 for caching")
|
print("test2 for caching")
|
||||||
|
litellm.set_verbose = True
|
||||||
response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
||||||
response2 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
response2 = completion(model="gpt-3.5-turbo", messages=messages, caching=True)
|
||||||
response3 = completion(model="command-nightly", messages=messages, caching=True)
|
response3 = completion(model="azure/chatgpt-v-2", messages=messages, caching=True)
|
||||||
print(f"response1: {response1}")
|
print(f"response1: {response1}")
|
||||||
print(f"response2: {response2}")
|
print(f"response2: {response2}")
|
||||||
print(f"response3: {response3}")
|
print(f"response3: {response3}")
|
||||||
|
@ -286,7 +287,7 @@ def test_redis_cache_completion():
|
||||||
response3 = completion(
|
response3 = completion(
|
||||||
model="gpt-3.5-turbo", messages=messages, caching=True, temperature=0.5
|
model="gpt-3.5-turbo", messages=messages, caching=True, temperature=0.5
|
||||||
)
|
)
|
||||||
response4 = completion(model="command-nightly", messages=messages, caching=True)
|
response4 = completion(model="azure/chatgpt-v-2", messages=messages, caching=True)
|
||||||
|
|
||||||
print("\nresponse 1", response1)
|
print("\nresponse 1", response1)
|
||||||
print("\nresponse 2", response2)
|
print("\nresponse 2", response2)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue