fix tests using in_memory_llm_clients_cache

This commit is contained in:
Ishaan Jaff 2024-11-21 12:41:09 -08:00
parent e63ea48894
commit 45130c2d4c
2 changed files with 15 additions and 7 deletions

View file

@ -8,6 +8,7 @@ import traceback
from dotenv import load_dotenv
from openai.types.image import Image
from litellm.caching import InMemoryCache
logging.basicConfig(level=logging.DEBUG)
load_dotenv()
@ -107,7 +108,7 @@ class TestVertexImageGeneration(BaseImageGenTest):
# comment this when running locally
load_vertex_ai_credentials()
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
return {
"model": "vertex_ai/imagegeneration@006",
"vertex_ai_project": "adroit-crow-413218",
@ -118,13 +119,13 @@ class TestVertexImageGeneration(BaseImageGenTest):
class TestBedrockSd3(BaseImageGenTest):
def get_base_image_generation_call_args(self) -> dict:
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
return {"model": "bedrock/stability.sd3-large-v1:0"}
class TestBedrockSd1(BaseImageGenTest):
def get_base_image_generation_call_args(self) -> dict:
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
return {"model": "bedrock/stability.sd3-large-v1:0"}
@ -181,7 +182,7 @@ def test_image_generation_azure_dall_e_3():
@pytest.mark.asyncio
async def test_aimage_generation_bedrock_with_optional_params():
try:
litellm.in_memory_llm_clients_cache = {}
litellm.in_memory_llm_clients_cache = InMemoryCache()
response = await litellm.aimage_generation(
prompt="A cute baby sea otter",
model="bedrock/stability.stable-diffusion-xl-v1",

View file

@ -7,6 +7,8 @@ import sys
from typing import Any
from unittest.mock import MagicMock, patch
from litellm.cache import InMemoryCache
logging.basicConfig(level=logging.DEBUG)
sys.path.insert(0, os.path.abspath("../.."))
@ -29,15 +31,20 @@ def langfuse_client():
f"{os.environ['LANGFUSE_PUBLIC_KEY']}-{os.environ['LANGFUSE_SECRET_KEY']}"
)
# use a in memory langfuse client for testing, RAM util on ci/cd gets too high when we init many langfuse clients
if _langfuse_cache_key in litellm.in_memory_llm_clients_cache:
langfuse_client = litellm.in_memory_llm_clients_cache[_langfuse_cache_key]
_cached_client = litellm.in_memory_llm_clients_cache.get_cache(_langfuse_cache_key)
if _cached_client:
langfuse_client = _cached_client
else:
langfuse_client = langfuse.Langfuse(
public_key=os.environ["LANGFUSE_PUBLIC_KEY"],
secret_key=os.environ["LANGFUSE_SECRET_KEY"],
host=None,
)
litellm.in_memory_llm_clients_cache[_langfuse_cache_key] = langfuse_client
litellm.in_memory_llm_clients_cache.set_cache(
key=_langfuse_cache_key,
value=langfuse_client,
)
print("NEW LANGFUSE CLIENT")