mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 00:34:44 +00:00
Handle more input content types in Responses API
This adds logic to handle input content types coming from the model's previous output messages (like in multi-turn conversations) and content types that are plain strings. it also raises an error if we find any unhandled content types, as that would be a bug we need to fix. Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
11081c2029
commit
dfb641a3de
1 changed files with 8 additions and 0 deletions
|
@ -68,12 +68,20 @@ async def _convert_response_input_to_chat_messages(
|
||||||
for input_message_content in input_message.content:
|
for input_message_content in input_message.content:
|
||||||
if isinstance(input_message_content, OpenAIResponseInputMessageContentText):
|
if isinstance(input_message_content, OpenAIResponseInputMessageContentText):
|
||||||
content.append(OpenAIChatCompletionContentPartTextParam(text=input_message_content.text))
|
content.append(OpenAIChatCompletionContentPartTextParam(text=input_message_content.text))
|
||||||
|
elif isinstance(input_message_content, OpenAIResponseOutputMessageContentOutputText):
|
||||||
|
content.append(OpenAIChatCompletionContentPartTextParam(text=input_message_content.text))
|
||||||
elif isinstance(input_message_content, OpenAIResponseInputMessageContentImage):
|
elif isinstance(input_message_content, OpenAIResponseInputMessageContentImage):
|
||||||
if input_message_content.image_url:
|
if input_message_content.image_url:
|
||||||
image_url = OpenAIImageURL(
|
image_url = OpenAIImageURL(
|
||||||
url=input_message_content.image_url, detail=input_message_content.detail
|
url=input_message_content.image_url, detail=input_message_content.detail
|
||||||
)
|
)
|
||||||
content.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url))
|
content.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url))
|
||||||
|
elif isinstance(input_message_content, str):
|
||||||
|
content.append(OpenAIChatCompletionContentPartTextParam(text=input_message_content))
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Llama Stack OpenAI Responses does not yet support content type '{type(input_message_content)}' in this context"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
content = input_message.content
|
content = input_message.content
|
||||||
message_type = await _get_message_type_by_role(input_message.role)
|
message_type = await _get_message_type_by_role(input_message.role)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue