From 640e5d0dc90dee85a2334116b5501cf22445a24c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 27 Aug 2024 22:44:09 -0700 Subject: [PATCH] test: fix test --- litellm/tests/test_completion.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index c9bcc3a5b..01435d6a3 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -77,7 +77,10 @@ def test_completion_custom_provider_model_name(): def _openai_mock_response(*args, **kwargs) -> litellm.ModelResponse: - _data = { + new_response = MagicMock() + new_response.headers = {"hello": "world"} + + response_object = { "id": "chatcmpl-123", "object": "chat.completion", "created": 1677652288, @@ -87,7 +90,7 @@ def _openai_mock_response(*args, **kwargs) -> litellm.ModelResponse: { "index": 0, "message": { - "role": None, + "role": "assistant", "content": "\n\nHello there, how may I assist you today?", }, "logprobs": None, @@ -96,7 +99,13 @@ def _openai_mock_response(*args, **kwargs) -> litellm.ModelResponse: ], "usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21}, } - return litellm.ModelResponse(**_data) + from openai import OpenAI + from openai.types.chat.chat_completion import ChatCompletion + + pydantic_obj = ChatCompletion(**response_object) # type: ignore + pydantic_obj.choices[0].message.role = None # type: ignore + new_response.parse.return_value = pydantic_obj + return new_response def test_null_role_response():