diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 90846b627..de605edff 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -237,14 +237,22 @@ class OpenAIChatCompletion(BaseLLM): status_code=422, message=f"Timeout needs to be a float" ) - if custom_llm_provider == "mistral": - # check if message content passed in as list, and not string - messages = prompt_factory( - model=model, - messages=messages, - custom_llm_provider=custom_llm_provider, - ) - + if custom_llm_provider != "openai": + # process all OpenAI compatible provider logic here + if custom_llm_provider == "mistral": + # check if message content passed in as list, and not string + messages = prompt_factory( + model=model, + messages=messages, + custom_llm_provider=custom_llm_provider, + ) + if custom_llm_provider == "perplexity" and messages is not None: + # check if messages.name is passed + supported, if not supported remove + messages = prompt_factory( + model=model, + messages=messages, + custom_llm_provider=custom_llm_provider, + ) for _ in range( 2 ): # if call fails due to alternating messages, retry with reformatted message diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 616833a2e..a13130c62 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -556,6 +556,7 @@ def anthropic_messages_pt(messages: list): 3. Each message must alternate between "user" and "assistant" (this is not addressed as now by litellm) 4. final assistant content cannot end with trailing whitespace (anthropic raises an error otherwise) 5. System messages are a separate param to the Messages API (used for tool calling) + 6. Ensure we only accept role, content. (message.name is not supported) """ ## Ensure final assistant message has no trailing whitespace last_assistant_message_idx: Optional[int] = None @@ -583,7 +584,9 @@ def anthropic_messages_pt(messages: list): new_content.append({"type": "text", "text": m["text"]}) new_messages.append({"role": messages[0]["role"], "content": new_content}) # type: ignore else: - new_messages.append(messages[0]) + new_messages.append( + {"role": messages[0]["role"], "content": messages[0]["content"]} + ) return new_messages @@ -606,7 +609,9 @@ def anthropic_messages_pt(messages: list): new_content.append({"type": "text", "content": m["text"]}) new_messages.append({"role": messages[i]["role"], "content": new_content}) # type: ignore else: - new_messages.append(messages[i]) + new_messages.append( + {"role": messages[i]["role"], "content": messages[i]["content"]} + ) if messages[i]["role"] == messages[i + 1]["role"]: if messages[i]["role"] == "user": @@ -897,6 +902,10 @@ def prompt_factory( return anthropic_pt(messages=messages) elif "mistral." in model: return mistral_instruct_pt(messages=messages) + elif custom_llm_provider == "perplexity": + for message in messages: + message.pop("name", None) + return messages try: if "meta-llama/llama-2" in model and "chat" in model: return llama_2_chat_pt(messages=messages) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 4db664dde..e54617bd9 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -83,12 +83,13 @@ def test_completion_claude(): def test_completion_claude_3_empty_response(): + litellm.set_verbose = True messages = [ { "role": "system", "content": "You are 2twNLGfqk4GMOn3ffp4p.", }, - {"role": "user", "content": "Hi gm!"}, + {"role": "user", "content": "Hi gm!", "name": "ishaan"}, {"role": "assistant", "content": "Good morning! How are you doing today?"}, { "role": "user",