diff --git a/litellm/litellm_core_utils/prompt_templates/factory.py b/litellm/litellm_core_utils/prompt_templates/factory.py index 5b11b224bb..d6a671bf60 100644 --- a/litellm/litellm_core_utils/prompt_templates/factory.py +++ b/litellm/litellm_core_utils/prompt_templates/factory.py @@ -88,7 +88,10 @@ def map_system_message_pt(messages: list) -> list: next_role == "user" or next_role == "assistant" ): # Next message is a user or assistant message # Merge system prompt into the next message - next_m["content"] = m["content"] + " " + next_m["content"] + if type(next_m['content']) is list: + next_m['content'].insert(0, {"type": "text", "text": m['content']}) + else: + next_m["content"] = m["content"] + " " + next_m["content"] elif next_role == "system": # Next message is a system message # Append a user message instead of the system message new_message = {"role": "user", "content": m["content"]} diff --git a/tests/llm_translation/test_optional_params.py b/tests/llm_translation/test_optional_params.py index e207e367e4..c11f8c9532 100644 --- a/tests/llm_translation/test_optional_params.py +++ b/tests/llm_translation/test_optional_params.py @@ -53,6 +53,29 @@ def test_supports_system_message(): assert isinstance(response, litellm.ModelResponse) +def test_supports_system_message_multipart(): + """ + Check if litellm.completion(...,supports_system_message=False) works with multipart user message + """ + messages = [ + ChatCompletionSystemMessageParam(role="system", content="Listen here!"), + ChatCompletionUserMessageParam(role="user", content=[{"type": "text", "text": "Hello there!"}]), + ] + + new_messages = map_system_message_pt(messages=messages) + + assert len(new_messages) == 1 + assert new_messages[0]["role"] == "user" + + ## confirm you can make a openai call with this param + + response = litellm.completion( + model="gpt-3.5-turbo", messages=new_messages, supports_system_message=False + ) + + assert isinstance(response, litellm.ModelResponse) + + @pytest.mark.parametrize( "stop_sequence, expected_count", [("\n", 0), (["\n"], 0), (["finish_reason"], 1)] )