fix(responses): .content error when resuming responses with file search

# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-09-30 15:01:18 -07:00
parent 42414a1a1b
commit 64c22735d0
2 changed files with 63 additions and 0 deletions

View file

@ -18,9 +18,11 @@ from llama_stack.apis.agents.openai_responses import (
OpenAIResponseMessage,
OpenAIResponseOutputMessageContent,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseOutputMessageWebSearchToolCall,
OpenAIResponseText,
)
from llama_stack.apis.inference import (
@ -156,6 +158,13 @@ async def convert_response_input_to_chat_messages(
):
# these are handled by the responses impl itself and not pass through to chat completions
pass
elif isinstance(input_item, OpenAIResponseOutputMessageWebSearchToolCall):
# web search tool calls are metadata only and don't need to be converted to chat messages
pass
elif isinstance(input_item, OpenAIResponseOutputMessageFileSearchToolCall):
# file search tool calls are metadata only and don't need to be converted to chat messages
# OpenAI re-executes file search on each turn rather than caching results
pass
else:
content = await convert_response_content_to_chat_content(input_item.content)
message_type = await get_message_type_by_role(input_item.role)