From 05a469177b32bfc3adf238bbc0f67b9c369a6313 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 11 Dec 2023 11:21:46 -0800 Subject: [PATCH] (test) proxy custom logger --- .../tests/test_amazing_proxy_custom_logger.py | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/litellm/tests/test_amazing_proxy_custom_logger.py b/litellm/tests/test_amazing_proxy_custom_logger.py index 15857324e..14752167f 100644 --- a/litellm/tests/test_amazing_proxy_custom_logger.py +++ b/litellm/tests/test_amazing_proxy_custom_logger.py @@ -50,6 +50,44 @@ headers = { "Authorization": f"Bearer {token}" } + +@pytest.mark.no_parallel +def test_embedding(client): + try: + # Your test data + print("initialized proxy") + # import the initialized custom logger + print(litellm.callbacks) + + assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback + my_custom_logger = litellm.callbacks[0] + assert my_custom_logger.async_success_embedding == False + + test_data = { + "model": "azure-embedding-model", + "input": ["hello"] + } + response = client.post("/embeddings", json=test_data, headers=headers) + print("made request", response.status_code, response.text) + assert my_custom_logger.async_success_embedding == True # checks if the status of async_success is True, only the async_log_success_event can set this to true + assert my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model" # checks if kwargs passed to async_log_success_event are correct + + kwargs = my_custom_logger.async_embedding_kwargs + litellm_params = kwargs.get("litellm_params") + metadata = litellm_params.get("metadata", None) + print("\n\n Metadata in custom logger kwargs", litellm_params.get("metadata")) + assert metadata is not None + assert "user_api_key" in metadata + assert "headers" in metadata + proxy_server_request = litellm_params.get("proxy_server_request") + model_info = litellm_params.get("model_info") + assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}} + assert model_info == {'input_cost_per_token': 0.002, 'mode': 'embedding', 'id': 'hello'} + result = response.json() + print(f"Received response: {result}") + except Exception as e: + pytest.fail("LiteLLM Proxy test failed. Exception", e) + @pytest.mark.no_parallel def test_chat_completion(client): try: @@ -159,41 +197,3 @@ def test_chat_completion_stream(client): except Exception as e: pytest.fail("LiteLLM Proxy test failed. Exception", e) - - -@pytest.mark.no_parallel -def test_embedding(client): - try: - # Your test data - print("initialized proxy") - # import the initialized custom logger - print(litellm.callbacks) - - assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback - my_custom_logger = litellm.callbacks[0] - assert my_custom_logger.async_success_embedding == False - - test_data = { - "model": "azure-embedding-model", - "input": ["hello"] - } - response = client.post("/embeddings", json=test_data, headers=headers) - print("made request", response.status_code, response.text) - assert my_custom_logger.async_success_embedding == True # checks if the status of async_success is True, only the async_log_success_event can set this to true - assert my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model" # checks if kwargs passed to async_log_success_event are correct - - kwargs = my_custom_logger.async_embedding_kwargs - litellm_params = kwargs.get("litellm_params") - metadata = litellm_params.get("metadata", None) - print("\n\n Metadata in custom logger kwargs", litellm_params.get("metadata")) - assert metadata is not None - assert "user_api_key" in metadata - assert "headers" in metadata - proxy_server_request = litellm_params.get("proxy_server_request") - model_info = litellm_params.get("model_info") - assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}} - assert model_info == {'input_cost_per_token': 0.002, 'mode': 'embedding', 'id': 'hello'} - result = response.json() - print(f"Received response: {result}") - except Exception as e: - pytest.fail("LiteLLM Proxy test failed. Exception", e) \ No newline at end of file