Merge pull request #2399 from BerriAI/litellm_support_name_for_anthropic

[Feat] Support messages.name for claude-3, perplexity ai API
This commit is contained in:
Ishaan Jaff 2024-03-08 10:47:45 -08:00 committed by GitHub
commit 85b981f602
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 29 additions and 11 deletions

View file

@ -237,6 +237,8 @@ class OpenAIChatCompletion(BaseLLM):
status_code=422, message=f"Timeout needs to be a float"
)
if custom_llm_provider != "openai":
# process all OpenAI compatible provider logic here
if custom_llm_provider == "mistral":
# check if message content passed in as list, and not string
messages = prompt_factory(
@ -244,7 +246,13 @@ class OpenAIChatCompletion(BaseLLM):
messages=messages,
custom_llm_provider=custom_llm_provider,
)
if custom_llm_provider == "perplexity" and messages is not None:
# check if messages.name is passed + supported, if not supported remove
messages = prompt_factory(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
)
for _ in range(
2
): # if call fails due to alternating messages, retry with reformatted message

View file

@ -556,6 +556,7 @@ def anthropic_messages_pt(messages: list):
3. Each message must alternate between "user" and "assistant" (this is not addressed as now by litellm)
4. final assistant content cannot end with trailing whitespace (anthropic raises an error otherwise)
5. System messages are a separate param to the Messages API (used for tool calling)
6. Ensure we only accept role, content. (message.name is not supported)
"""
## Ensure final assistant message has no trailing whitespace
last_assistant_message_idx: Optional[int] = None
@ -583,7 +584,9 @@ def anthropic_messages_pt(messages: list):
new_content.append({"type": "text", "text": m["text"]})
new_messages.append({"role": messages[0]["role"], "content": new_content}) # type: ignore
else:
new_messages.append(messages[0])
new_messages.append(
{"role": messages[0]["role"], "content": messages[0]["content"]}
)
return new_messages
@ -606,7 +609,9 @@ def anthropic_messages_pt(messages: list):
new_content.append({"type": "text", "content": m["text"]})
new_messages.append({"role": messages[i]["role"], "content": new_content}) # type: ignore
else:
new_messages.append(messages[i])
new_messages.append(
{"role": messages[i]["role"], "content": messages[i]["content"]}
)
if messages[i]["role"] == messages[i + 1]["role"]:
if messages[i]["role"] == "user":
@ -897,6 +902,10 @@ def prompt_factory(
return anthropic_pt(messages=messages)
elif "mistral." in model:
return mistral_instruct_pt(messages=messages)
elif custom_llm_provider == "perplexity":
for message in messages:
message.pop("name", None)
return messages
try:
if "meta-llama/llama-2" in model and "chat" in model:
return llama_2_chat_pt(messages=messages)

View file

@ -83,12 +83,13 @@ def test_completion_claude():
def test_completion_claude_3_empty_response():
litellm.set_verbose = True
messages = [
{
"role": "system",
"content": "You are 2twNLGfqk4GMOn3ffp4p.",
},
{"role": "user", "content": "Hi gm!"},
{"role": "user", "content": "Hi gm!", "name": "ishaan"},
{"role": "assistant", "content": "Good morning! How are you doing today?"},
{
"role": "user",