Stub in an initial OpenAI Responses API

Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
Ben Browning 2025-04-17 14:47:24 -04:00 committed by Ashwin Bharambe
parent c149cf2e0f
commit 70c088af3a
18 changed files with 441 additions and 0 deletions

View file

@ -0,0 +1,19 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any, Dict
from llama_stack.apis.datatypes import Api
from .config import OpenAIResponsesImplConfig
async def get_provider_impl(config: OpenAIResponsesImplConfig, deps: Dict[Api, Any]):
from .openai_responses import OpenAIResponsesImpl
impl = OpenAIResponsesImpl(config, deps[Api.models], deps[Api.inference])
await impl.initialize()
return impl

View file

@ -0,0 +1,24 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any, Dict
from pydantic import BaseModel
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
class OpenAIResponsesImplConfig(BaseModel):
kvstore: KVStoreConfig
@classmethod
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
return {
"kvstore": SqliteKVStoreConfig.sample_run_config(
__distro_dir__=__distro_dir__,
db_name="openai_responses.db",
)
}

View file

@ -0,0 +1,126 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import uuid
from typing import AsyncIterator, List, Optional, cast
from llama_stack.apis.inference.inference import (
Inference,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionContentPartTextParam,
OpenAIMessageParam,
OpenAIUserMessageParam,
)
from llama_stack.apis.models.models import Models, ModelType
from llama_stack.apis.openai_responses import OpenAIResponses
from llama_stack.apis.openai_responses.openai_responses import (
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseOutputMessage,
OpenAIResponseOutputMessageContentOutputText,
)
from llama_stack.log import get_logger
from llama_stack.providers.utils.kvstore import kvstore_impl
from .config import OpenAIResponsesImplConfig
logger = get_logger(name=__name__, category="openai_responses")
OPENAI_RESPONSES_PREFIX = "openai_responses:"
class OpenAIResponsesImpl(OpenAIResponses):
def __init__(self, config: OpenAIResponsesImplConfig, models_api: Models, inference_api: Inference):
self.config = config
self.models_api = models_api
self.inference_api = inference_api
async def initialize(self) -> None:
self.kvstore = await kvstore_impl(self.config.kvstore)
async def shutdown(self) -> None:
logger.debug("OpenAIResponsesImpl.shutdown")
pass
async def get_openai_response(
self,
id: str,
) -> OpenAIResponseObject:
key = f"{OPENAI_RESPONSES_PREFIX}{id}"
response_json = await self.kvstore.get(key=key)
if response_json is None:
raise ValueError(f"OpenAI response with id '{id}' not found")
return OpenAIResponseObject.model_validate_json(response_json)
async def create_openai_response(
self,
input: str,
model: str,
previous_response_id: Optional[str] = None,
store: Optional[bool] = True,
stream: Optional[bool] = False,
):
model_obj = await self.models_api.get_model(model)
if model_obj is None:
raise ValueError(f"Model '{model}' not found")
if model_obj.model_type == ModelType.embedding:
raise ValueError(f"Model '{model}' is an embedding model and does not support chat completions")
messages: List[OpenAIMessageParam] = []
if previous_response_id:
previous_response = await self.get_openai_response(previous_response_id)
messages.append(OpenAIAssistantMessageParam(content=previous_response.output[0].content[0].text))
messages.append(OpenAIUserMessageParam(content=input))
chat_response = await self.inference_api.openai_chat_completion(
model=model_obj.identifier,
messages=messages,
)
# type cast to appease mypy
chat_response = cast(OpenAIChatCompletion, chat_response)
output_messages = []
for choice in chat_response.choices:
output_content = ""
if isinstance(choice.message.content, str):
output_content = choice.message.content
elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam):
output_content = choice.message.content.text
# TODO: handle image content
output_messages.append(
OpenAIResponseOutputMessage(
id=f"msg_{uuid.uuid4()}",
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
status="completed",
)
)
response = OpenAIResponseObject(
created_at=chat_response.created,
id=f"resp-{uuid.uuid4()}",
model=model_obj.identifier,
object="response",
status="completed",
output=output_messages,
)
if store:
# Store in kvstore
key = f"{OPENAI_RESPONSES_PREFIX}{response.id}"
await self.kvstore.set(
key=key,
value=response.model_dump_json(),
)
if stream:
async def async_response() -> AsyncIterator[OpenAIResponseObjectStream]:
yield OpenAIResponseObjectStream(response=response)
return async_response()
return response

View file

@ -0,0 +1,25 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.openai_responses,
provider_type="inline::openai-responses",
pip_packages=[],
module="llama_stack.providers.inline.openai_responses",
config_class="llama_stack.providers.inline.openai_responses.config.OpenAIResponsesImplConfig",
api_dependencies=[
Api.models,
Api.inference,
],
),
]