diff --git a/litellm/proxy/tests/test_openai_simple_embedding.py b/litellm/proxy/tests/test_openai_simple_embedding.py new file mode 100644 index 000000000..7dd38c0b3 --- /dev/null +++ b/litellm/proxy/tests/test_openai_simple_embedding.py @@ -0,0 +1,10 @@ +import openai + +client = openai.OpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000") + +# # request sent to model set on litellm proxy, `litellm --model` +response = client.embeddings.create( + model="text-embedding-ada-002", input=["test"], encoding_format="base64" +) + +print(response) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 835d3611b..2c3c863de 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -387,6 +387,41 @@ async def test_embedding_caching_azure_individual_items_reordered(): assert embedding_val_1.data[1]["index"] == 1 +@pytest.mark.asyncio +async def test_embedding_caching_base_64(): + """ """ + litellm.cache = Cache( + type="redis", + host=os.environ["REDIS_HOST"], + port=os.environ["REDIS_PORT"], + ) + import uuid + + inputs = [ + f"{uuid.uuid4()} hello this is ishaan", + f"{uuid.uuid4()} hello this is ishaan again", + ] + + embedding_val_1 = await aembedding( + model="azure/azure-embedding-model", + input=inputs, + caching=True, + encoding_format="base64", + ) + embedding_val_2 = await aembedding( + model="azure/azure-embedding-model", + input=inputs, + caching=True, + encoding_format="base64", + ) + + assert embedding_val_2._hidden_params["cache_hit"] == True + print(embedding_val_2) + print(embedding_val_1) + assert embedding_val_2.data[0]["embedding"] == embedding_val_1.data[0]["embedding"] + assert embedding_val_2.data[1]["embedding"] == embedding_val_1.data[1]["embedding"] + + @pytest.mark.asyncio async def test_redis_cache_basic(): """