forked from phoenix/litellm-mirror
Merge pull request #2399 from BerriAI/litellm_support_name_for_anthropic
[Feat] Support messages.name for claude-3, perplexity ai API
This commit is contained in:
commit
85b981f602
3 changed files with 29 additions and 11 deletions
|
@ -237,14 +237,22 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
status_code=422, message=f"Timeout needs to be a float"
|
status_code=422, message=f"Timeout needs to be a float"
|
||||||
)
|
)
|
||||||
|
|
||||||
if custom_llm_provider == "mistral":
|
if custom_llm_provider != "openai":
|
||||||
# check if message content passed in as list, and not string
|
# process all OpenAI compatible provider logic here
|
||||||
messages = prompt_factory(
|
if custom_llm_provider == "mistral":
|
||||||
model=model,
|
# check if message content passed in as list, and not string
|
||||||
messages=messages,
|
messages = prompt_factory(
|
||||||
custom_llm_provider=custom_llm_provider,
|
model=model,
|
||||||
)
|
messages=messages,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
)
|
||||||
|
if custom_llm_provider == "perplexity" and messages is not None:
|
||||||
|
# check if messages.name is passed + supported, if not supported remove
|
||||||
|
messages = prompt_factory(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
)
|
||||||
for _ in range(
|
for _ in range(
|
||||||
2
|
2
|
||||||
): # if call fails due to alternating messages, retry with reformatted message
|
): # if call fails due to alternating messages, retry with reformatted message
|
||||||
|
|
|
@ -556,6 +556,7 @@ def anthropic_messages_pt(messages: list):
|
||||||
3. Each message must alternate between "user" and "assistant" (this is not addressed as now by litellm)
|
3. Each message must alternate between "user" and "assistant" (this is not addressed as now by litellm)
|
||||||
4. final assistant content cannot end with trailing whitespace (anthropic raises an error otherwise)
|
4. final assistant content cannot end with trailing whitespace (anthropic raises an error otherwise)
|
||||||
5. System messages are a separate param to the Messages API (used for tool calling)
|
5. System messages are a separate param to the Messages API (used for tool calling)
|
||||||
|
6. Ensure we only accept role, content. (message.name is not supported)
|
||||||
"""
|
"""
|
||||||
## Ensure final assistant message has no trailing whitespace
|
## Ensure final assistant message has no trailing whitespace
|
||||||
last_assistant_message_idx: Optional[int] = None
|
last_assistant_message_idx: Optional[int] = None
|
||||||
|
@ -583,7 +584,9 @@ def anthropic_messages_pt(messages: list):
|
||||||
new_content.append({"type": "text", "text": m["text"]})
|
new_content.append({"type": "text", "text": m["text"]})
|
||||||
new_messages.append({"role": messages[0]["role"], "content": new_content}) # type: ignore
|
new_messages.append({"role": messages[0]["role"], "content": new_content}) # type: ignore
|
||||||
else:
|
else:
|
||||||
new_messages.append(messages[0])
|
new_messages.append(
|
||||||
|
{"role": messages[0]["role"], "content": messages[0]["content"]}
|
||||||
|
)
|
||||||
|
|
||||||
return new_messages
|
return new_messages
|
||||||
|
|
||||||
|
@ -606,7 +609,9 @@ def anthropic_messages_pt(messages: list):
|
||||||
new_content.append({"type": "text", "content": m["text"]})
|
new_content.append({"type": "text", "content": m["text"]})
|
||||||
new_messages.append({"role": messages[i]["role"], "content": new_content}) # type: ignore
|
new_messages.append({"role": messages[i]["role"], "content": new_content}) # type: ignore
|
||||||
else:
|
else:
|
||||||
new_messages.append(messages[i])
|
new_messages.append(
|
||||||
|
{"role": messages[i]["role"], "content": messages[i]["content"]}
|
||||||
|
)
|
||||||
|
|
||||||
if messages[i]["role"] == messages[i + 1]["role"]:
|
if messages[i]["role"] == messages[i + 1]["role"]:
|
||||||
if messages[i]["role"] == "user":
|
if messages[i]["role"] == "user":
|
||||||
|
@ -897,6 +902,10 @@ def prompt_factory(
|
||||||
return anthropic_pt(messages=messages)
|
return anthropic_pt(messages=messages)
|
||||||
elif "mistral." in model:
|
elif "mistral." in model:
|
||||||
return mistral_instruct_pt(messages=messages)
|
return mistral_instruct_pt(messages=messages)
|
||||||
|
elif custom_llm_provider == "perplexity":
|
||||||
|
for message in messages:
|
||||||
|
message.pop("name", None)
|
||||||
|
return messages
|
||||||
try:
|
try:
|
||||||
if "meta-llama/llama-2" in model and "chat" in model:
|
if "meta-llama/llama-2" in model and "chat" in model:
|
||||||
return llama_2_chat_pt(messages=messages)
|
return llama_2_chat_pt(messages=messages)
|
||||||
|
|
|
@ -83,12 +83,13 @@ def test_completion_claude():
|
||||||
|
|
||||||
|
|
||||||
def test_completion_claude_3_empty_response():
|
def test_completion_claude_3_empty_response():
|
||||||
|
litellm.set_verbose = True
|
||||||
messages = [
|
messages = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": "You are 2twNLGfqk4GMOn3ffp4p.",
|
"content": "You are 2twNLGfqk4GMOn3ffp4p.",
|
||||||
},
|
},
|
||||||
{"role": "user", "content": "Hi gm!"},
|
{"role": "user", "content": "Hi gm!", "name": "ishaan"},
|
||||||
{"role": "assistant", "content": "Good morning! How are you doing today?"},
|
{"role": "assistant", "content": "Good morning! How are you doing today?"},
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue