mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
Extract some helper methods out in openai_responses impl
This extracts out a helper message to convert previous responses to messages and to convert openai choices (from a chat completion response) into output messages for the OpenAI Responses output. Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
70c088af3a
commit
52a69f0bf9
1 changed files with 29 additions and 17 deletions
|
@ -12,6 +12,7 @@ from llama_stack.apis.inference.inference import (
|
||||||
OpenAIAssistantMessageParam,
|
OpenAIAssistantMessageParam,
|
||||||
OpenAIChatCompletion,
|
OpenAIChatCompletion,
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
OpenAIChatCompletionContentPartTextParam,
|
||||||
|
OpenAIChoice,
|
||||||
OpenAIMessageParam,
|
OpenAIMessageParam,
|
||||||
OpenAIUserMessageParam,
|
OpenAIUserMessageParam,
|
||||||
)
|
)
|
||||||
|
@ -33,6 +34,32 @@ logger = get_logger(name=__name__, category="openai_responses")
|
||||||
OPENAI_RESPONSES_PREFIX = "openai_responses:"
|
OPENAI_RESPONSES_PREFIX = "openai_responses:"
|
||||||
|
|
||||||
|
|
||||||
|
async def _previous_response_to_messages(previous_response: OpenAIResponseObject) -> List[OpenAIMessageParam]:
|
||||||
|
messages: List[OpenAIMessageParam] = []
|
||||||
|
for output_message in previous_response.output:
|
||||||
|
messages.append(OpenAIAssistantMessageParam(content=output_message.content[0].text))
|
||||||
|
return messages
|
||||||
|
|
||||||
|
|
||||||
|
async def _openai_choices_to_output_messages(choices: List[OpenAIChoice]) -> List[OpenAIResponseOutputMessage]:
|
||||||
|
output_messages = []
|
||||||
|
for choice in choices:
|
||||||
|
output_content = ""
|
||||||
|
if isinstance(choice.message.content, str):
|
||||||
|
output_content = choice.message.content
|
||||||
|
elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam):
|
||||||
|
output_content = choice.message.content.text
|
||||||
|
# TODO: handle image content
|
||||||
|
output_messages.append(
|
||||||
|
OpenAIResponseOutputMessage(
|
||||||
|
id=f"msg_{uuid.uuid4()}",
|
||||||
|
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
|
||||||
|
status="completed",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return output_messages
|
||||||
|
|
||||||
|
|
||||||
class OpenAIResponsesImpl(OpenAIResponses):
|
class OpenAIResponsesImpl(OpenAIResponses):
|
||||||
def __init__(self, config: OpenAIResponsesImplConfig, models_api: Models, inference_api: Inference):
|
def __init__(self, config: OpenAIResponsesImplConfig, models_api: Models, inference_api: Inference):
|
||||||
self.config = config
|
self.config = config
|
||||||
|
@ -73,8 +100,7 @@ class OpenAIResponsesImpl(OpenAIResponses):
|
||||||
messages: List[OpenAIMessageParam] = []
|
messages: List[OpenAIMessageParam] = []
|
||||||
if previous_response_id:
|
if previous_response_id:
|
||||||
previous_response = await self.get_openai_response(previous_response_id)
|
previous_response = await self.get_openai_response(previous_response_id)
|
||||||
messages.append(OpenAIAssistantMessageParam(content=previous_response.output[0].content[0].text))
|
messages.extend(await _previous_response_to_messages(previous_response))
|
||||||
|
|
||||||
messages.append(OpenAIUserMessageParam(content=input))
|
messages.append(OpenAIUserMessageParam(content=input))
|
||||||
|
|
||||||
chat_response = await self.inference_api.openai_chat_completion(
|
chat_response = await self.inference_api.openai_chat_completion(
|
||||||
|
@ -84,21 +110,7 @@ class OpenAIResponsesImpl(OpenAIResponses):
|
||||||
# type cast to appease mypy
|
# type cast to appease mypy
|
||||||
chat_response = cast(OpenAIChatCompletion, chat_response)
|
chat_response = cast(OpenAIChatCompletion, chat_response)
|
||||||
|
|
||||||
output_messages = []
|
output_messages = await _openai_choices_to_output_messages(chat_response.choices)
|
||||||
for choice in chat_response.choices:
|
|
||||||
output_content = ""
|
|
||||||
if isinstance(choice.message.content, str):
|
|
||||||
output_content = choice.message.content
|
|
||||||
elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam):
|
|
||||||
output_content = choice.message.content.text
|
|
||||||
# TODO: handle image content
|
|
||||||
output_messages.append(
|
|
||||||
OpenAIResponseOutputMessage(
|
|
||||||
id=f"msg_{uuid.uuid4()}",
|
|
||||||
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
|
|
||||||
status="completed",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
response = OpenAIResponseObject(
|
response = OpenAIResponseObject(
|
||||||
created_at=chat_response.created,
|
created_at=chat_response.created,
|
||||||
id=f"resp-{uuid.uuid4()}",
|
id=f"resp-{uuid.uuid4()}",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue