mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 12:06:04 +00:00
fix tests
This commit is contained in:
parent
f820123b99
commit
442d9f40a4
3 changed files with 3 additions and 2 deletions
|
|
@ -264,6 +264,7 @@ class StreamingResponseOrchestrator:
|
|||
completion_result_data = stream_event_or_result
|
||||
else:
|
||||
yield stream_event_or_result
|
||||
|
||||
# If violation detected, skip the rest of processing since we already sent refusal
|
||||
if self.violation_detected:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ async def convert_response_input_to_chat_messages(
|
|||
pass
|
||||
else:
|
||||
content = await convert_response_content_to_chat_content(input_item.content)
|
||||
message_type = get_message_type_by_role(input_item.role)
|
||||
message_type = await get_message_type_by_role(input_item.role)
|
||||
if message_type is None:
|
||||
raise ValueError(
|
||||
f"Llama Stack OpenAI Responses does not yet support message role '{input_item.role}' in this context"
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ def test_extract_shield_ids_unknown_format(responses_impl):
|
|||
# Create an object that's neither string nor ResponseShieldSpec
|
||||
unknown_object = {"invalid": "format"} # Plain dict, not ResponseShieldSpec
|
||||
shields = ["valid-shield", unknown_object, "another-shield"]
|
||||
with pytest.raises(ValueError, match="Unsupported shield type"):
|
||||
with pytest.raises(ValueError, match="Unknown shield format.*expected str or ResponseShieldSpec"):
|
||||
extract_shield_ids(shields)
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue