mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-28 21:58:46 +00:00
Stub in an initial OpenAI Responses API
Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
c149cf2e0f
commit
70c088af3a
18 changed files with 441 additions and 0 deletions
|
|
@ -24,6 +24,7 @@ class Api(Enum):
|
|||
eval = "eval"
|
||||
post_training = "post_training"
|
||||
tool_runtime = "tool_runtime"
|
||||
openai_responses = "openai_responses"
|
||||
|
||||
telemetry = "telemetry"
|
||||
|
||||
|
|
|
|||
7
llama_stack/apis/openai_responses/__init__.py
Normal file
7
llama_stack/apis/openai_responses/__init__.py
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .openai_responses import * # noqa: F401 F403
|
||||
91
llama_stack/apis/openai_responses/openai_responses.py
Normal file
91
llama_stack/apis/openai_responses/openai_responses.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import AsyncIterator, List, Literal, Optional, Protocol, Union, runtime_checkable
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from llama_stack.schema_utils import json_schema_type, webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseError(BaseModel):
|
||||
code: str
|
||||
message: str
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageContentOutputText(BaseModel):
|
||||
text: str
|
||||
type: Literal["output_text"] = "output_text"
|
||||
|
||||
|
||||
OpenAIResponseOutputMessageContent = Annotated[
|
||||
Union[OpenAIResponseOutputMessageContentOutputText,],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessage(BaseModel):
|
||||
id: str
|
||||
content: List[OpenAIResponseOutputMessageContent]
|
||||
role: Literal["assistant"] = "assistant"
|
||||
status: str
|
||||
type: Literal["message"] = "message"
|
||||
|
||||
|
||||
OpenAIResponseOutput = Annotated[
|
||||
Union[OpenAIResponseOutputMessage,],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObject(BaseModel):
|
||||
created_at: int
|
||||
error: Optional[OpenAIResponseError] = None
|
||||
id: str
|
||||
model: str
|
||||
object: Literal["response"] = "response"
|
||||
output: List[OpenAIResponseOutput]
|
||||
parallel_tool_calls: bool = False
|
||||
previous_response_id: Optional[str] = None
|
||||
status: str
|
||||
temperature: Optional[float] = None
|
||||
top_p: Optional[float] = None
|
||||
truncation: Optional[str] = None
|
||||
user: Optional[str] = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStream(BaseModel):
|
||||
response: OpenAIResponseObject
|
||||
type: Literal["response.created"] = "response.created"
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class OpenAIResponses(Protocol):
|
||||
"""
|
||||
OpenAI Responses API implementation.
|
||||
"""
|
||||
|
||||
@webmethod(route="/openai/v1/responses/{id}", method="GET")
|
||||
async def get_openai_response(
|
||||
self,
|
||||
id: str,
|
||||
) -> OpenAIResponseObject: ...
|
||||
|
||||
@webmethod(route="/openai/v1/responses", method="POST")
|
||||
async def create_openai_response(
|
||||
self,
|
||||
input: str,
|
||||
model: str,
|
||||
previous_response_id: Optional[str] = None,
|
||||
store: Optional[bool] = True,
|
||||
stream: Optional[bool] = False,
|
||||
) -> Union[OpenAIResponseObject, AsyncIterator[OpenAIResponseObjectStream]]: ...
|
||||
|
|
@ -16,6 +16,7 @@ from llama_stack.apis.files import Files
|
|||
from llama_stack.apis.inference import Inference
|
||||
from llama_stack.apis.inspect import Inspect
|
||||
from llama_stack.apis.models import Models
|
||||
from llama_stack.apis.openai_responses.openai_responses import OpenAIResponses
|
||||
from llama_stack.apis.post_training import PostTraining
|
||||
from llama_stack.apis.providers import Providers as ProvidersAPI
|
||||
from llama_stack.apis.safety import Safety
|
||||
|
|
@ -80,6 +81,7 @@ def api_protocol_map() -> Dict[Api, Any]:
|
|||
Api.tool_groups: ToolGroups,
|
||||
Api.tool_runtime: ToolRuntime,
|
||||
Api.files: Files,
|
||||
Api.openai_responses: OpenAIResponses,
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -149,6 +149,8 @@ class CommonRoutingTableImpl(RoutingTable):
|
|||
p.benchmark_store = self
|
||||
elif api == Api.tool_runtime:
|
||||
p.tool_store = self
|
||||
elif api == Api.openai_responses:
|
||||
p.model_store = self
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
for p in self.impls_by_provider_id.values():
|
||||
|
|
|
|||
19
llama_stack/providers/inline/openai_responses/__init__.py
Normal file
19
llama_stack/providers/inline/openai_responses/__init__.py
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from llama_stack.apis.datatypes import Api
|
||||
|
||||
from .config import OpenAIResponsesImplConfig
|
||||
|
||||
|
||||
async def get_provider_impl(config: OpenAIResponsesImplConfig, deps: Dict[Api, Any]):
|
||||
from .openai_responses import OpenAIResponsesImpl
|
||||
|
||||
impl = OpenAIResponsesImpl(config, deps[Api.models], deps[Api.inference])
|
||||
await impl.initialize()
|
||||
return impl
|
||||
24
llama_stack/providers/inline/openai_responses/config.py
Normal file
24
llama_stack/providers/inline/openai_responses/config.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
|
||||
|
||||
|
||||
class OpenAIResponsesImplConfig(BaseModel):
|
||||
kvstore: KVStoreConfig
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
|
||||
return {
|
||||
"kvstore": SqliteKVStoreConfig.sample_run_config(
|
||||
__distro_dir__=__distro_dir__,
|
||||
db_name="openai_responses.db",
|
||||
)
|
||||
}
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import uuid
|
||||
from typing import AsyncIterator, List, Optional, cast
|
||||
|
||||
from llama_stack.apis.inference.inference import (
|
||||
Inference,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionContentPartTextParam,
|
||||
OpenAIMessageParam,
|
||||
OpenAIUserMessageParam,
|
||||
)
|
||||
from llama_stack.apis.models.models import Models, ModelType
|
||||
from llama_stack.apis.openai_responses import OpenAIResponses
|
||||
from llama_stack.apis.openai_responses.openai_responses import (
|
||||
OpenAIResponseObject,
|
||||
OpenAIResponseObjectStream,
|
||||
OpenAIResponseOutputMessage,
|
||||
OpenAIResponseOutputMessageContentOutputText,
|
||||
)
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
|
||||
from .config import OpenAIResponsesImplConfig
|
||||
|
||||
logger = get_logger(name=__name__, category="openai_responses")
|
||||
|
||||
OPENAI_RESPONSES_PREFIX = "openai_responses:"
|
||||
|
||||
|
||||
class OpenAIResponsesImpl(OpenAIResponses):
|
||||
def __init__(self, config: OpenAIResponsesImplConfig, models_api: Models, inference_api: Inference):
|
||||
self.config = config
|
||||
self.models_api = models_api
|
||||
self.inference_api = inference_api
|
||||
|
||||
async def initialize(self) -> None:
|
||||
self.kvstore = await kvstore_impl(self.config.kvstore)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
logger.debug("OpenAIResponsesImpl.shutdown")
|
||||
pass
|
||||
|
||||
async def get_openai_response(
|
||||
self,
|
||||
id: str,
|
||||
) -> OpenAIResponseObject:
|
||||
key = f"{OPENAI_RESPONSES_PREFIX}{id}"
|
||||
response_json = await self.kvstore.get(key=key)
|
||||
if response_json is None:
|
||||
raise ValueError(f"OpenAI response with id '{id}' not found")
|
||||
return OpenAIResponseObject.model_validate_json(response_json)
|
||||
|
||||
async def create_openai_response(
|
||||
self,
|
||||
input: str,
|
||||
model: str,
|
||||
previous_response_id: Optional[str] = None,
|
||||
store: Optional[bool] = True,
|
||||
stream: Optional[bool] = False,
|
||||
):
|
||||
model_obj = await self.models_api.get_model(model)
|
||||
if model_obj is None:
|
||||
raise ValueError(f"Model '{model}' not found")
|
||||
if model_obj.model_type == ModelType.embedding:
|
||||
raise ValueError(f"Model '{model}' is an embedding model and does not support chat completions")
|
||||
|
||||
messages: List[OpenAIMessageParam] = []
|
||||
if previous_response_id:
|
||||
previous_response = await self.get_openai_response(previous_response_id)
|
||||
messages.append(OpenAIAssistantMessageParam(content=previous_response.output[0].content[0].text))
|
||||
|
||||
messages.append(OpenAIUserMessageParam(content=input))
|
||||
|
||||
chat_response = await self.inference_api.openai_chat_completion(
|
||||
model=model_obj.identifier,
|
||||
messages=messages,
|
||||
)
|
||||
# type cast to appease mypy
|
||||
chat_response = cast(OpenAIChatCompletion, chat_response)
|
||||
|
||||
output_messages = []
|
||||
for choice in chat_response.choices:
|
||||
output_content = ""
|
||||
if isinstance(choice.message.content, str):
|
||||
output_content = choice.message.content
|
||||
elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam):
|
||||
output_content = choice.message.content.text
|
||||
# TODO: handle image content
|
||||
output_messages.append(
|
||||
OpenAIResponseOutputMessage(
|
||||
id=f"msg_{uuid.uuid4()}",
|
||||
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
|
||||
status="completed",
|
||||
)
|
||||
)
|
||||
response = OpenAIResponseObject(
|
||||
created_at=chat_response.created,
|
||||
id=f"resp-{uuid.uuid4()}",
|
||||
model=model_obj.identifier,
|
||||
object="response",
|
||||
status="completed",
|
||||
output=output_messages,
|
||||
)
|
||||
|
||||
if store:
|
||||
# Store in kvstore
|
||||
key = f"{OPENAI_RESPONSES_PREFIX}{response.id}"
|
||||
await self.kvstore.set(
|
||||
key=key,
|
||||
value=response.model_dump_json(),
|
||||
)
|
||||
|
||||
if stream:
|
||||
|
||||
async def async_response() -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||
yield OpenAIResponseObjectStream(response=response)
|
||||
|
||||
return async_response()
|
||||
|
||||
return response
|
||||
25
llama_stack/providers/registry/openai_responses.py
Normal file
25
llama_stack/providers/registry/openai_responses.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import List
|
||||
|
||||
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||
|
||||
|
||||
def available_providers() -> List[ProviderSpec]:
|
||||
return [
|
||||
InlineProviderSpec(
|
||||
api=Api.openai_responses,
|
||||
provider_type="inline::openai-responses",
|
||||
pip_packages=[],
|
||||
module="llama_stack.providers.inline.openai_responses",
|
||||
config_class="llama_stack.providers.inline.openai_responses.config.OpenAIResponsesImplConfig",
|
||||
api_dependencies=[
|
||||
Api.models,
|
||||
Api.inference,
|
||||
],
|
||||
),
|
||||
]
|
||||
|
|
@ -31,4 +31,6 @@ distribution_spec:
|
|||
- inline::rag-runtime
|
||||
- remote::model-context-protocol
|
||||
- remote::wolfram-alpha
|
||||
openai_responses:
|
||||
- inline::openai-responses
|
||||
image_type: conda
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ apis:
|
|||
- datasetio
|
||||
- eval
|
||||
- inference
|
||||
- openai_responses
|
||||
- safety
|
||||
- scoring
|
||||
- telemetry
|
||||
|
|
@ -115,6 +116,14 @@ providers:
|
|||
provider_type: remote::wolfram-alpha
|
||||
config:
|
||||
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
|
||||
openai_responses:
|
||||
- provider_id: openai-responses
|
||||
provider_type: inline::openai-responses
|
||||
config:
|
||||
kvstore:
|
||||
type: sqlite
|
||||
namespace: null
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/openai_responses.db
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ apis:
|
|||
- datasetio
|
||||
- eval
|
||||
- inference
|
||||
- openai_responses
|
||||
- safety
|
||||
- scoring
|
||||
- telemetry
|
||||
|
|
@ -108,6 +109,14 @@ providers:
|
|||
provider_type: remote::wolfram-alpha
|
||||
config:
|
||||
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
|
||||
openai_responses:
|
||||
- provider_id: openai-responses
|
||||
provider_type: inline::openai-responses
|
||||
config:
|
||||
kvstore:
|
||||
type: sqlite
|
||||
namespace: null
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/openai_responses.db
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
"remote::model-context-protocol",
|
||||
"remote::wolfram-alpha",
|
||||
],
|
||||
"openai_responses": ["inline::openai-responses"],
|
||||
}
|
||||
name = "remote-vllm"
|
||||
inference_provider = Provider(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue