forked from phoenix/litellm-mirror
fix(factory.py): handle message content being a list instead of string
Fixes https://github.com/BerriAI/litellm/issues/4679
This commit is contained in:
parent
70b96d12e9
commit
0decc36bed
2 changed files with 23 additions and 4 deletions
|
@ -2393,7 +2393,16 @@ def custom_prompt(
|
||||||
if role in role_dict and "post_message" in role_dict[role]
|
if role in role_dict and "post_message" in role_dict[role]
|
||||||
else ""
|
else ""
|
||||||
)
|
)
|
||||||
prompt += pre_message_str + message["content"] + post_message_str
|
if isinstance(message["content"], str):
|
||||||
|
prompt += pre_message_str + message["content"] + post_message_str
|
||||||
|
elif isinstance(message["content"], list):
|
||||||
|
text_str = ""
|
||||||
|
for content in message["content"]:
|
||||||
|
if content.get("text", None) is not None and isinstance(
|
||||||
|
content["text"], str
|
||||||
|
):
|
||||||
|
text_str += content["text"]
|
||||||
|
prompt += pre_message_str + text_str + post_message_str
|
||||||
|
|
||||||
if role == "assistant":
|
if role == "assistant":
|
||||||
prompt += eos_token
|
prompt += eos_token
|
||||||
|
|
|
@ -599,9 +599,19 @@ def test_bedrock_claude_3(image_url):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"system", ["You are an AI", [{"type": "text", "text": "You are an AI"}]]
|
"system",
|
||||||
|
["You are an AI", [{"type": "text", "text": "You are an AI"}]],
|
||||||
)
|
)
|
||||||
def test_bedrock_claude_3_system_prompt(system):
|
@pytest.mark.parametrize(
|
||||||
|
"model",
|
||||||
|
[
|
||||||
|
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||||
|
"meta.llama3-70b-instruct-v1:0",
|
||||||
|
"anthropic.claude-v2",
|
||||||
|
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_bedrock_system_prompt(system, model):
|
||||||
try:
|
try:
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
data = {
|
data = {
|
||||||
|
@ -614,7 +624,7 @@ def test_bedrock_claude_3_system_prompt(system):
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
response: ModelResponse = completion(
|
response: ModelResponse = completion(
|
||||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
model="bedrock/{}".format(model),
|
||||||
**data,
|
**data,
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue