mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
(test) batch writing to cache
This commit is contained in:
parent
412a56eea4
commit
a0601723e5
1 changed files with 42 additions and 0 deletions
|
@ -386,6 +386,48 @@ async def test_redis_cache_basic():
|
|||
assert stored_val["id"] == response1.id
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_redis_batch_cache_write():
|
||||
"""
|
||||
Init redis client
|
||||
- write to client
|
||||
- read from client
|
||||
"""
|
||||
litellm.set_verbose = True
|
||||
import uuid
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": f"write a one sentence poem about: {uuid.uuid4()}"},
|
||||
]
|
||||
litellm.cache = Cache(
|
||||
type="redis",
|
||||
host=os.environ["REDIS_HOST"],
|
||||
port=os.environ["REDIS_PORT"],
|
||||
password=os.environ["REDIS_PASSWORD"],
|
||||
redis_flush_size=2,
|
||||
)
|
||||
response1 = await litellm.acompletion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
response2 = await litellm.acompletion(
|
||||
model="anthropic/claude-3-opus-20240229",
|
||||
messages=messages,
|
||||
mock_response="good morning from this test",
|
||||
)
|
||||
|
||||
# we hit the flush size, this will now send to redis
|
||||
await asyncio.sleep(2)
|
||||
|
||||
response4 = await litellm.acompletion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
assert response1.id == response4.id
|
||||
|
||||
|
||||
def test_redis_cache_completion():
|
||||
litellm.set_verbose = False
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue