From 52a69f0bf90c5ad0b17cb00519f32c2ef7110ff0 Mon Sep 17 00:00:00 2001 From: Ben Browning Date: Thu, 17 Apr 2025 15:10:22 -0400 Subject: [PATCH] Extract some helper methods out in openai_responses impl This extracts out a helper message to convert previous responses to messages and to convert openai choices (from a chat completion response) into output messages for the OpenAI Responses output. Signed-off-by: Ben Browning --- .../openai_responses/openai_responses.py | 46 ++++++++++++------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/llama_stack/providers/inline/openai_responses/openai_responses.py b/llama_stack/providers/inline/openai_responses/openai_responses.py index c23fe9b75..bb26a10fa 100644 --- a/llama_stack/providers/inline/openai_responses/openai_responses.py +++ b/llama_stack/providers/inline/openai_responses/openai_responses.py @@ -12,6 +12,7 @@ from llama_stack.apis.inference.inference import ( OpenAIAssistantMessageParam, OpenAIChatCompletion, OpenAIChatCompletionContentPartTextParam, + OpenAIChoice, OpenAIMessageParam, OpenAIUserMessageParam, ) @@ -33,6 +34,32 @@ logger = get_logger(name=__name__, category="openai_responses") OPENAI_RESPONSES_PREFIX = "openai_responses:" +async def _previous_response_to_messages(previous_response: OpenAIResponseObject) -> List[OpenAIMessageParam]: + messages: List[OpenAIMessageParam] = [] + for output_message in previous_response.output: + messages.append(OpenAIAssistantMessageParam(content=output_message.content[0].text)) + return messages + + +async def _openai_choices_to_output_messages(choices: List[OpenAIChoice]) -> List[OpenAIResponseOutputMessage]: + output_messages = [] + for choice in choices: + output_content = "" + if isinstance(choice.message.content, str): + output_content = choice.message.content + elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam): + output_content = choice.message.content.text + # TODO: handle image content + output_messages.append( + OpenAIResponseOutputMessage( + id=f"msg_{uuid.uuid4()}", + content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)], + status="completed", + ) + ) + return output_messages + + class OpenAIResponsesImpl(OpenAIResponses): def __init__(self, config: OpenAIResponsesImplConfig, models_api: Models, inference_api: Inference): self.config = config @@ -73,8 +100,7 @@ class OpenAIResponsesImpl(OpenAIResponses): messages: List[OpenAIMessageParam] = [] if previous_response_id: previous_response = await self.get_openai_response(previous_response_id) - messages.append(OpenAIAssistantMessageParam(content=previous_response.output[0].content[0].text)) - + messages.extend(await _previous_response_to_messages(previous_response)) messages.append(OpenAIUserMessageParam(content=input)) chat_response = await self.inference_api.openai_chat_completion( @@ -84,21 +110,7 @@ class OpenAIResponsesImpl(OpenAIResponses): # type cast to appease mypy chat_response = cast(OpenAIChatCompletion, chat_response) - output_messages = [] - for choice in chat_response.choices: - output_content = "" - if isinstance(choice.message.content, str): - output_content = choice.message.content - elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam): - output_content = choice.message.content.text - # TODO: handle image content - output_messages.append( - OpenAIResponseOutputMessage( - id=f"msg_{uuid.uuid4()}", - content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)], - status="completed", - ) - ) + output_messages = await _openai_choices_to_output_messages(chat_response.choices) response = OpenAIResponseObject( created_at=chat_response.created, id=f"resp-{uuid.uuid4()}",