fix should_use_cache

This commit is contained in:
Ishaan Jaff 2024-08-24 09:37:41 -07:00
parent 0d03b807b9
commit cad77c5969
2 changed files with 16 additions and 10 deletions

View file

@ -1941,6 +1941,8 @@ async def test_cache_default_off_acompletion():
)
print(f"Response3: {response3}")
await asyncio.sleep(2)
response4 = await litellm.acompletion(
model="gpt-3.5-turbo",
messages=[