From 39072bd196c8c1b432000c5d471c5b1605ac290b Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 23 Nov 2023 20:56:40 -0800 Subject: [PATCH] (test) proxy - azure/chat/completion test --- litellm/tests/test_proxy_server.py | 52 ++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index d8eb1ffcd1..9988a928a7 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -46,3 +46,55 @@ def test_chat_completion(): # Run the test test_chat_completion() + + +def test_chat_completion_azure(): + try: + # Your test data + test_data = { + "model": "azure/chatgpt-v-2", + "messages": [ + { + "role": "user", + "content": "hi" + }, + ], + "max_tokens": 10, + } + print("testing proxy server with Azure Request") + response = client.post("/v1/chat/completions", json=test_data) + + assert response.status_code == 200 + result = response.json() + print(f"Received response: {result}") + except Exception as e: + pytest.fail("LiteLLM Proxy test failed. Exception", e) + +# Run the test +test_chat_completion() + + +# def test_embedding(): +# try: +# # Your test data +# test_data = { +# "model": "", +# "messages": [ +# { +# "role": "user", +# "content": "hi" +# }, +# ], +# "max_tokens": 10, +# } +# print("testing proxy server with OpenAI embedding") +# response = client.post("/v1/embeddings", json=test_data) + +# assert response.status_code == 200 +# result = response.json() +# print(f"Received response: {result}") +# except Exception as e: +# pytest.fail("LiteLLM Proxy test failed. Exception", e) + +# # Run the test +# test_embedding()