mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-22 08:17:18 +00:00
fix tests
This commit is contained in:
parent
b5c951fa4b
commit
7b5dd54b7a
4 changed files with 13 additions and 8 deletions
|
@ -345,7 +345,7 @@ class OpenAIResponsesImpl:
|
|||
return
|
||||
|
||||
# Structured outputs
|
||||
response_format = convert_response_text_to_chat_response_format(text)
|
||||
response_format = await convert_response_text_to_chat_response_format(text)
|
||||
|
||||
ctx = ChatCompletionContext(
|
||||
model=model,
|
||||
|
|
|
@ -13,8 +13,8 @@ from llama_stack.apis.agents.openai_responses import (
|
|||
ApprovalFilter,
|
||||
MCPListToolsTool,
|
||||
OpenAIResponseContentPartOutputText,
|
||||
OpenAIResponseError,
|
||||
OpenAIResponseContentPartRefusal,
|
||||
OpenAIResponseError,
|
||||
OpenAIResponseInputTool,
|
||||
OpenAIResponseInputToolMCP,
|
||||
OpenAIResponseMCPApprovalRequest,
|
||||
|
@ -226,6 +226,11 @@ class StreamingResponseOrchestrator:
|
|||
completion_result_data = stream_event_or_result
|
||||
else:
|
||||
yield stream_event_or_result
|
||||
|
||||
# If violation detected, skip the rest of processing since we already sent refusal
|
||||
if self.violation_detected:
|
||||
return
|
||||
|
||||
if not completion_result_data:
|
||||
raise ValueError("Streaming chunk processor failed to return completion data")
|
||||
last_completion_result = completion_result_data
|
||||
|
|
|
@ -178,7 +178,7 @@ async def convert_response_input_to_chat_messages(
|
|||
pass
|
||||
else:
|
||||
content = await convert_response_content_to_chat_content(input_item.content)
|
||||
message_type = get_message_type_by_role(input_item.role)
|
||||
message_type = await get_message_type_by_role(input_item.role)
|
||||
if message_type is None:
|
||||
raise ValueError(
|
||||
f"Llama Stack OpenAI Responses does not yet support message role '{input_item.role}' in this context"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue