forked from phoenix/litellm-mirror
test: fix test
This commit is contained in:
parent
1ee0e8a9c3
commit
640e5d0dc9
1 changed files with 12 additions and 3 deletions
|
@ -77,7 +77,10 @@ def test_completion_custom_provider_model_name():
|
|||
|
||||
|
||||
def _openai_mock_response(*args, **kwargs) -> litellm.ModelResponse:
|
||||
_data = {
|
||||
new_response = MagicMock()
|
||||
new_response.headers = {"hello": "world"}
|
||||
|
||||
response_object = {
|
||||
"id": "chatcmpl-123",
|
||||
"object": "chat.completion",
|
||||
"created": 1677652288,
|
||||
|
@ -87,7 +90,7 @@ def _openai_mock_response(*args, **kwargs) -> litellm.ModelResponse:
|
|||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": None,
|
||||
"role": "assistant",
|
||||
"content": "\n\nHello there, how may I assist you today?",
|
||||
},
|
||||
"logprobs": None,
|
||||
|
@ -96,7 +99,13 @@ def _openai_mock_response(*args, **kwargs) -> litellm.ModelResponse:
|
|||
],
|
||||
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
|
||||
}
|
||||
return litellm.ModelResponse(**_data)
|
||||
from openai import OpenAI
|
||||
from openai.types.chat.chat_completion import ChatCompletion
|
||||
|
||||
pydantic_obj = ChatCompletion(**response_object) # type: ignore
|
||||
pydantic_obj.choices[0].message.role = None # type: ignore
|
||||
new_response.parse.return_value = pydantic_obj
|
||||
return new_response
|
||||
|
||||
|
||||
def test_null_role_response():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue