From 45130c2d4c5bb1fcd4d03203c41ccebcf998e22f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 21 Nov 2024 12:41:09 -0800 Subject: [PATCH] fix tests using in_memory_llm_clients_cache --- tests/image_gen_tests/test_image_generation.py | 9 +++++---- tests/local_testing/test_alangfuse.py | 13 ++++++++++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/tests/image_gen_tests/test_image_generation.py b/tests/image_gen_tests/test_image_generation.py index 692a0e4e9..6605b3e3d 100644 --- a/tests/image_gen_tests/test_image_generation.py +++ b/tests/image_gen_tests/test_image_generation.py @@ -8,6 +8,7 @@ import traceback from dotenv import load_dotenv from openai.types.image import Image +from litellm.caching import InMemoryCache logging.basicConfig(level=logging.DEBUG) load_dotenv() @@ -107,7 +108,7 @@ class TestVertexImageGeneration(BaseImageGenTest): # comment this when running locally load_vertex_ai_credentials() - litellm.in_memory_llm_clients_cache = {} + litellm.in_memory_llm_clients_cache = InMemoryCache() return { "model": "vertex_ai/imagegeneration@006", "vertex_ai_project": "adroit-crow-413218", @@ -118,13 +119,13 @@ class TestVertexImageGeneration(BaseImageGenTest): class TestBedrockSd3(BaseImageGenTest): def get_base_image_generation_call_args(self) -> dict: - litellm.in_memory_llm_clients_cache = {} + litellm.in_memory_llm_clients_cache = InMemoryCache() return {"model": "bedrock/stability.sd3-large-v1:0"} class TestBedrockSd1(BaseImageGenTest): def get_base_image_generation_call_args(self) -> dict: - litellm.in_memory_llm_clients_cache = {} + litellm.in_memory_llm_clients_cache = InMemoryCache() return {"model": "bedrock/stability.sd3-large-v1:0"} @@ -181,7 +182,7 @@ def test_image_generation_azure_dall_e_3(): @pytest.mark.asyncio async def test_aimage_generation_bedrock_with_optional_params(): try: - litellm.in_memory_llm_clients_cache = {} + litellm.in_memory_llm_clients_cache = InMemoryCache() response = await litellm.aimage_generation( prompt="A cute baby sea otter", model="bedrock/stability.stable-diffusion-xl-v1", diff --git a/tests/local_testing/test_alangfuse.py b/tests/local_testing/test_alangfuse.py index 8c69f567b..78c9805da 100644 --- a/tests/local_testing/test_alangfuse.py +++ b/tests/local_testing/test_alangfuse.py @@ -7,6 +7,8 @@ import sys from typing import Any from unittest.mock import MagicMock, patch +from litellm.cache import InMemoryCache + logging.basicConfig(level=logging.DEBUG) sys.path.insert(0, os.path.abspath("../..")) @@ -29,15 +31,20 @@ def langfuse_client(): f"{os.environ['LANGFUSE_PUBLIC_KEY']}-{os.environ['LANGFUSE_SECRET_KEY']}" ) # use a in memory langfuse client for testing, RAM util on ci/cd gets too high when we init many langfuse clients - if _langfuse_cache_key in litellm.in_memory_llm_clients_cache: - langfuse_client = litellm.in_memory_llm_clients_cache[_langfuse_cache_key] + + _cached_client = litellm.in_memory_llm_clients_cache.get_cache(_langfuse_cache_key) + if _cached_client: + langfuse_client = _cached_client else: langfuse_client = langfuse.Langfuse( public_key=os.environ["LANGFUSE_PUBLIC_KEY"], secret_key=os.environ["LANGFUSE_SECRET_KEY"], host=None, ) - litellm.in_memory_llm_clients_cache[_langfuse_cache_key] = langfuse_client + litellm.in_memory_llm_clients_cache.set_cache( + key=_langfuse_cache_key, + value=langfuse_client, + ) print("NEW LANGFUSE CLIENT")