forked from phoenix/litellm-mirror
fix(anthropic.py): support openai system message being a list
This commit is contained in:
parent
cb4c42e061
commit
fb0a13c8bb
3 changed files with 14 additions and 6 deletions
|
@ -780,8 +780,17 @@ class AnthropicChatCompletion(BaseLLM):
|
|||
system_prompt = ""
|
||||
for idx, message in enumerate(messages):
|
||||
if message["role"] == "system":
|
||||
system_prompt += message["content"]
|
||||
system_prompt_indices.append(idx)
|
||||
valid_content: bool = False
|
||||
if isinstance(message["content"], str):
|
||||
system_prompt += message["content"]
|
||||
valid_content = True
|
||||
elif isinstance(message["content"], list):
|
||||
for content in message["content"]:
|
||||
system_prompt += content.get("text", "")
|
||||
valid_content = True
|
||||
|
||||
if valid_content:
|
||||
system_prompt_indices.append(idx)
|
||||
if len(system_prompt_indices) > 0:
|
||||
for idx in reversed(system_prompt_indices):
|
||||
messages.pop(idx)
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
model_list:
|
||||
- model_name: groq-llama3
|
||||
- model_name: anthropic-claude
|
||||
litellm_params:
|
||||
model: groq/llama3-groq-70b-8192-tool-use-preview
|
||||
api_key: os.environ/GROQ_API_KEY
|
||||
model: claude-3-haiku-20240307
|
||||
|
||||
litellm_settings:
|
||||
callbacks: ["logfire"]
|
||||
|
|
|
@ -346,7 +346,7 @@ def test_completion_claude_3_empty_response():
|
|||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are 2twNLGfqk4GMOn3ffp4p.",
|
||||
"content": [{"type": "text", "text": "You are 2twNLGfqk4GMOn3ffp4p."}],
|
||||
},
|
||||
{"role": "user", "content": "Hi gm!", "name": "ishaan"},
|
||||
{"role": "assistant", "content": "Good morning! How are you doing today?"},
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue