forked from phoenix/litellm-mirror
fix(bedrock_httpx.py): handle user error - malformed system prompt
if user passes in system prompt as a list of content blocks, handle that
This commit is contained in:
parent
905abab526
commit
88eb25da5c
2 changed files with 38 additions and 6 deletions
|
@ -1815,7 +1815,12 @@ class BedrockConverseLLM(BaseLLM):
|
||||||
system_content_blocks: List[SystemContentBlock] = []
|
system_content_blocks: List[SystemContentBlock] = []
|
||||||
for idx, message in enumerate(messages):
|
for idx, message in enumerate(messages):
|
||||||
if message["role"] == "system":
|
if message["role"] == "system":
|
||||||
_system_content_block = SystemContentBlock(text=message["content"])
|
if isinstance(message["content"], str):
|
||||||
|
_system_content_block = SystemContentBlock(text=message["content"])
|
||||||
|
elif isinstance(message["content"], list):
|
||||||
|
for m in message["content"]:
|
||||||
|
if m.get("type", "") == "text":
|
||||||
|
_system_content_block = SystemContentBlock(text=m["text"])
|
||||||
system_content_blocks.append(_system_content_block)
|
system_content_blocks.append(_system_content_block)
|
||||||
system_prompt_indices.append(idx)
|
system_prompt_indices.append(idx)
|
||||||
if len(system_prompt_indices) > 0:
|
if len(system_prompt_indices) > 0:
|
||||||
|
|
|
@ -586,14 +586,41 @@ def test_bedrock_claude_3(image_url):
|
||||||
response: ModelResponse = completion(
|
response: ModelResponse = completion(
|
||||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||||
num_retries=3,
|
num_retries=3,
|
||||||
# messages=messages,
|
|
||||||
# max_tokens=10,
|
|
||||||
# temperature=0.78,
|
|
||||||
**data,
|
**data,
|
||||||
)
|
) # type: ignore
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
assert len(response.choices) > 0
|
assert len(response.choices) > 0
|
||||||
assert len(response.choices[0].message.content) > 0
|
assert len(response.choices[0].message.content) > 0
|
||||||
|
|
||||||
|
except RateLimitError:
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"system", ["You are an AI", [{"type": "text", "text": "You are an AI"}]]
|
||||||
|
)
|
||||||
|
def test_bedrock_claude_3_system_prompt(system):
|
||||||
|
try:
|
||||||
|
litellm.set_verbose = True
|
||||||
|
data = {
|
||||||
|
"max_tokens": 100,
|
||||||
|
"stream": False,
|
||||||
|
"temperature": 0.3,
|
||||||
|
"messages": [
|
||||||
|
{"role": "system", "content": system},
|
||||||
|
{"role": "user", "content": "hey, how's it going?"},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
response: ModelResponse = completion(
|
||||||
|
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||||
|
**data,
|
||||||
|
) # type: ignore
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
assert len(response.choices) > 0
|
||||||
|
assert len(response.choices[0].message.content) > 0
|
||||||
|
|
||||||
except RateLimitError:
|
except RateLimitError:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -637,7 +664,7 @@ def test_bedrock_claude_3_tool_calling():
|
||||||
messages=messages,
|
messages=messages,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
tool_choice="auto",
|
tool_choice="auto",
|
||||||
)
|
) # type: ignore
|
||||||
print(f"response: {response}")
|
print(f"response: {response}")
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
assert isinstance(response.choices[0].message.tool_calls[0].function.name, str)
|
assert isinstance(response.choices[0].message.tool_calls[0].function.name, str)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue