From c929c274f6b2406f39d9cb070b0131ac090e88cf Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 23 Nov 2023 21:16:50 -0800 Subject: [PATCH] (test) proxy: test_embedding --- litellm/tests/test_proxy_cli.py | 52 ------------------------------ litellm/tests/test_proxy_server.py | 43 +++++++++++------------- 2 files changed, 19 insertions(+), 76 deletions(-) delete mode 100644 litellm/tests/test_proxy_cli.py diff --git a/litellm/tests/test_proxy_cli.py b/litellm/tests/test_proxy_cli.py deleted file mode 100644 index 43148525c1..0000000000 --- a/litellm/tests/test_proxy_cli.py +++ /dev/null @@ -1,52 +0,0 @@ -# COMMENT: This is a new test added today Nov 16th, that is flaky - will need to look into this and update what's going wrong here -# import subprocess -# import time -# import openai -# import pytest -# from dotenv import load_dotenv -# import os - -# load_dotenv() - -# ## This tests the litellm proxy cli, it creates a proxy server and makes a basic chat completion request to gpt-3.5-turbo -# ## Do not comment this test out - -# def test_basic_proxy_cli_command(): - -# # Command to run -# print("current working dir", os.getcwd()) - -# command = "python3 litellm/proxy/proxy_cli.py --model gpt-3.5-turbo --port 51670 --debug" -# print("Running command to start proxy") - -# # Start the subprocess asynchronously -# process = subprocess.Popen(command, shell=True) - -# # Allow some time for the proxy server to start (adjust as needed) -# time.sleep(1) - -# # Make a request using the openai package -# client = openai.OpenAI( -# api_key="Your API Key", # Replace with your actual API key -# base_url="http://0.0.0.0:51670" -# ) - -# try: -# response = client.chat.completions.create(model="gpt-3.5-turbo", messages=[ -# { -# "role": "user", -# "content": "this is a test request, write a short poem" -# } -# ]) -# print(response) -# response_str = response.choices[0].message.content -# assert len(response_str) > 10 -# except Exception as e: -# print("Got exception") -# print(e) -# process.terminate() # Terminate the subprocess to close down the server -# pytest.fail("Basic test, proxy cli failed", e) - -# # Terminate the subprocess to close down the server -# process.terminate() -# test_basic_proxy_cli_command() diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 9988a928a7..0a32653ad5 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -45,7 +45,7 @@ def test_chat_completion(): pytest.fail("LiteLLM Proxy test failed. Exception", e) # Run the test -test_chat_completion() +# test_chat_completion() def test_chat_completion_azure(): @@ -56,7 +56,7 @@ def test_chat_completion_azure(): "messages": [ { "role": "user", - "content": "hi" + "content": "write 1 sentence poem" }, ], "max_tokens": 10, @@ -67,34 +67,29 @@ def test_chat_completion_azure(): assert response.status_code == 200 result = response.json() print(f"Received response: {result}") + assert len(result["choices"][0]["message"]["content"]) > 0 except Exception as e: pytest.fail("LiteLLM Proxy test failed. Exception", e) # Run the test -test_chat_completion() +# test_chat_completion_azure() -# def test_embedding(): -# try: -# # Your test data -# test_data = { -# "model": "", -# "messages": [ -# { -# "role": "user", -# "content": "hi" -# }, -# ], -# "max_tokens": 10, -# } -# print("testing proxy server with OpenAI embedding") -# response = client.post("/v1/embeddings", json=test_data) +def test_embedding(): + try: + test_data = { + "model": "azure/azure-embedding-model", + "input": ["good morning from litellm"], + } + print("testing proxy server with OpenAI embedding") + response = client.post("/v1/embeddings", json=test_data) -# assert response.status_code == 200 -# result = response.json() -# print(f"Received response: {result}") -# except Exception as e: -# pytest.fail("LiteLLM Proxy test failed. Exception", e) + assert response.status_code == 200 + result = response.json() + print(len(result["data"][0]["embedding"])) + assert len(result["data"][0]["embedding"]) > 10 # this usually has len==1536 so + except Exception as e: + pytest.fail("LiteLLM Proxy test failed. Exception", e) -# # Run the test +# Run the test # test_embedding()