From 8c628b64e039dc661d35875cb10054c0af0d0079 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 8 Dec 2023 17:25:03 -0800 Subject: [PATCH] (test) proxy - custom logger --- .../tests/test_configs/custom_callbacks.py | 6 ++ litellm/tests/test_proxy_custom_logger.py | 62 +++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/litellm/tests/test_configs/custom_callbacks.py b/litellm/tests/test_configs/custom_callbacks.py index b65abcfc9..94b89e589 100644 --- a/litellm/tests/test_configs/custom_callbacks.py +++ b/litellm/tests/test_configs/custom_callbacks.py @@ -17,6 +17,8 @@ class MyCustomHandler(CustomLogger): self.async_completion_kwargs_fail = None # type: ignore self.async_embedding_kwargs_fail = None # type: ignore + + self.streaming_response_obj = None # type: ignore blue_color_code = "\033[94m" reset_color_code = "\033[0m" print(f"{blue_color_code}Initialized LiteLLM custom logger") @@ -57,6 +59,10 @@ class MyCustomHandler(CustomLogger): self.async_success_embedding = True self.async_embedding_kwargs = kwargs self.async_embedding_response = response_obj + if kwargs.get("stream") == True: + self.streaming_response_obj = response_obj + + self.async_completion_kwargs = kwargs model = kwargs.get("model", None) diff --git a/litellm/tests/test_proxy_custom_logger.py b/litellm/tests/test_proxy_custom_logger.py index 503ea4a81..a97f42843 100644 --- a/litellm/tests/test_proxy_custom_logger.py +++ b/litellm/tests/test_proxy_custom_logger.py @@ -87,6 +87,68 @@ def test_chat_completion(client): pytest.fail("LiteLLM Proxy test failed. Exception", e) +def test_chat_completion_stream(client): + try: + # Your test data + import json + print("initialized proxy") + # import the initialized custom logger + print(litellm.callbacks) + + assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback + my_custom_logger = litellm.callbacks[0] + + assert my_custom_logger.streaming_response_obj == None # no streaming response obj is set pre call + + test_data = { + "model": "Azure OpenAI GPT-4 Canada", + "messages": [ + { + "role": "user", + "content": "write 1 line poem about LiteLLM" + }, + ], + "max_tokens": 40, + "stream": True # streaming call + } + + + response = client.post("/chat/completions", json=test_data, headers=headers) + print("made request", response.status_code, response.text) + complete_response = "" + for line in response.iter_lines(): + if line: + # Process the streaming data line here + print("\n\n Line", line) + print(line) + line = str(line) + + json_data = line.replace('data: ', '') + + # Parse the JSON string + data = json.loads(json_data) + + print("\n\n decode_data", data) + + # Access the content of choices[0]['message']['content'] + content = data['choices'][0]['delta']['content'] or "" + + # Process the content as needed + print("Content:", content) + + complete_response+= content + + print("\n\nHERE is the complete streaming response string", complete_response) + print("\n\nHERE IS the streaming Response from callback\n\n") + print(my_custom_logger.streaming_response_obj) + + streamed_response = my_custom_logger.streaming_response_obj + assert complete_response == streamed_response["choices"][0]["message"]["content"] + + except Exception as e: + pytest.fail("LiteLLM Proxy test failed. Exception", e) + + def test_embedding(client): try: