mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-07 12:47:37 +00:00
fix(responses): .content error when resuming responses with file search
# What does this PR do? ## Test Plan
This commit is contained in:
parent
42414a1a1b
commit
64c22735d0
2 changed files with 63 additions and 0 deletions
|
@ -18,9 +18,11 @@ from llama_stack.apis.agents.openai_responses import (
|
|||
OpenAIResponseMessage,
|
||||
OpenAIResponseOutputMessageContent,
|
||||
OpenAIResponseOutputMessageContentOutputText,
|
||||
OpenAIResponseOutputMessageFileSearchToolCall,
|
||||
OpenAIResponseOutputMessageFunctionToolCall,
|
||||
OpenAIResponseOutputMessageMCPCall,
|
||||
OpenAIResponseOutputMessageMCPListTools,
|
||||
OpenAIResponseOutputMessageWebSearchToolCall,
|
||||
OpenAIResponseText,
|
||||
)
|
||||
from llama_stack.apis.inference import (
|
||||
|
@ -156,6 +158,13 @@ async def convert_response_input_to_chat_messages(
|
|||
):
|
||||
# these are handled by the responses impl itself and not pass through to chat completions
|
||||
pass
|
||||
elif isinstance(input_item, OpenAIResponseOutputMessageWebSearchToolCall):
|
||||
# web search tool calls are metadata only and don't need to be converted to chat messages
|
||||
pass
|
||||
elif isinstance(input_item, OpenAIResponseOutputMessageFileSearchToolCall):
|
||||
# file search tool calls are metadata only and don't need to be converted to chat messages
|
||||
# OpenAI re-executes file search on each turn rather than caching results
|
||||
pass
|
||||
else:
|
||||
content = await convert_response_content_to_chat_content(input_item.content)
|
||||
message_type = await get_message_type_by_role(input_item.role)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue