forked from phoenix/litellm-mirror
fix tests using in_memory_llm_clients_cache
This commit is contained in:
parent
e63ea48894
commit
45130c2d4c
2 changed files with 15 additions and 7 deletions
|
@ -8,6 +8,7 @@ import traceback
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from openai.types.image import Image
|
from openai.types.image import Image
|
||||||
|
from litellm.caching import InMemoryCache
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
@ -107,7 +108,7 @@ class TestVertexImageGeneration(BaseImageGenTest):
|
||||||
# comment this when running locally
|
# comment this when running locally
|
||||||
load_vertex_ai_credentials()
|
load_vertex_ai_credentials()
|
||||||
|
|
||||||
litellm.in_memory_llm_clients_cache = {}
|
litellm.in_memory_llm_clients_cache = InMemoryCache()
|
||||||
return {
|
return {
|
||||||
"model": "vertex_ai/imagegeneration@006",
|
"model": "vertex_ai/imagegeneration@006",
|
||||||
"vertex_ai_project": "adroit-crow-413218",
|
"vertex_ai_project": "adroit-crow-413218",
|
||||||
|
@ -118,13 +119,13 @@ class TestVertexImageGeneration(BaseImageGenTest):
|
||||||
|
|
||||||
class TestBedrockSd3(BaseImageGenTest):
|
class TestBedrockSd3(BaseImageGenTest):
|
||||||
def get_base_image_generation_call_args(self) -> dict:
|
def get_base_image_generation_call_args(self) -> dict:
|
||||||
litellm.in_memory_llm_clients_cache = {}
|
litellm.in_memory_llm_clients_cache = InMemoryCache()
|
||||||
return {"model": "bedrock/stability.sd3-large-v1:0"}
|
return {"model": "bedrock/stability.sd3-large-v1:0"}
|
||||||
|
|
||||||
|
|
||||||
class TestBedrockSd1(BaseImageGenTest):
|
class TestBedrockSd1(BaseImageGenTest):
|
||||||
def get_base_image_generation_call_args(self) -> dict:
|
def get_base_image_generation_call_args(self) -> dict:
|
||||||
litellm.in_memory_llm_clients_cache = {}
|
litellm.in_memory_llm_clients_cache = InMemoryCache()
|
||||||
return {"model": "bedrock/stability.sd3-large-v1:0"}
|
return {"model": "bedrock/stability.sd3-large-v1:0"}
|
||||||
|
|
||||||
|
|
||||||
|
@ -181,7 +182,7 @@ def test_image_generation_azure_dall_e_3():
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_aimage_generation_bedrock_with_optional_params():
|
async def test_aimage_generation_bedrock_with_optional_params():
|
||||||
try:
|
try:
|
||||||
litellm.in_memory_llm_clients_cache = {}
|
litellm.in_memory_llm_clients_cache = InMemoryCache()
|
||||||
response = await litellm.aimage_generation(
|
response = await litellm.aimage_generation(
|
||||||
prompt="A cute baby sea otter",
|
prompt="A cute baby sea otter",
|
||||||
model="bedrock/stability.stable-diffusion-xl-v1",
|
model="bedrock/stability.stable-diffusion-xl-v1",
|
||||||
|
|
|
@ -7,6 +7,8 @@ import sys
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
from litellm.cache import InMemoryCache
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
sys.path.insert(0, os.path.abspath("../.."))
|
sys.path.insert(0, os.path.abspath("../.."))
|
||||||
|
|
||||||
|
@ -29,15 +31,20 @@ def langfuse_client():
|
||||||
f"{os.environ['LANGFUSE_PUBLIC_KEY']}-{os.environ['LANGFUSE_SECRET_KEY']}"
|
f"{os.environ['LANGFUSE_PUBLIC_KEY']}-{os.environ['LANGFUSE_SECRET_KEY']}"
|
||||||
)
|
)
|
||||||
# use a in memory langfuse client for testing, RAM util on ci/cd gets too high when we init many langfuse clients
|
# use a in memory langfuse client for testing, RAM util on ci/cd gets too high when we init many langfuse clients
|
||||||
if _langfuse_cache_key in litellm.in_memory_llm_clients_cache:
|
|
||||||
langfuse_client = litellm.in_memory_llm_clients_cache[_langfuse_cache_key]
|
_cached_client = litellm.in_memory_llm_clients_cache.get_cache(_langfuse_cache_key)
|
||||||
|
if _cached_client:
|
||||||
|
langfuse_client = _cached_client
|
||||||
else:
|
else:
|
||||||
langfuse_client = langfuse.Langfuse(
|
langfuse_client = langfuse.Langfuse(
|
||||||
public_key=os.environ["LANGFUSE_PUBLIC_KEY"],
|
public_key=os.environ["LANGFUSE_PUBLIC_KEY"],
|
||||||
secret_key=os.environ["LANGFUSE_SECRET_KEY"],
|
secret_key=os.environ["LANGFUSE_SECRET_KEY"],
|
||||||
host=None,
|
host=None,
|
||||||
)
|
)
|
||||||
litellm.in_memory_llm_clients_cache[_langfuse_cache_key] = langfuse_client
|
litellm.in_memory_llm_clients_cache.set_cache(
|
||||||
|
key=_langfuse_cache_key,
|
||||||
|
value=langfuse_client,
|
||||||
|
)
|
||||||
|
|
||||||
print("NEW LANGFUSE CLIENT")
|
print("NEW LANGFUSE CLIENT")
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue