fold openai responses into the Agents API

This commit is contained in:
Ashwin Bharambe 2025-04-28 10:27:28 -07:00
parent 207224a811
commit abd6280cb8
25 changed files with 967 additions and 199 deletions

View file

@ -38,6 +38,13 @@ from llama_stack.apis.safety import SafetyViolation
from llama_stack.apis.tools import ToolDef
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
from .openai_responses import (
OpenAIResponseInputMessage,
OpenAIResponseInputTool,
OpenAIResponseObject,
OpenAIResponseObjectStream,
)
class Attachment(BaseModel):
"""An attachment to an agent turn.
@ -593,3 +600,39 @@ class Agents(Protocol):
:returns: A ListAgentSessionsResponse.
"""
...
# We situate the OpenAI Responses API in the Agents API just like we did things
# for Inference. The Responses API, in its intent, serves the same purpose as
# the Agents API above -- it is essentially a lightweight "agentic loop" with
# integrated tool calling.
#
# Both of these APIs are inherently stateful.
@webmethod(route="/openai/v1/responses/{id}", method="GET")
async def get_openai_response(
self,
id: str,
) -> OpenAIResponseObject:
"""Retrieve an OpenAI response by its ID.
:param id: The ID of the OpenAI response to retrieve.
:returns: An OpenAIResponseObject.
"""
...
@webmethod(route="/openai/v1/responses", method="POST")
async def create_openai_response(
self,
input: Union[str, List[OpenAIResponseInputMessage]],
model: str,
previous_response_id: Optional[str] = None,
store: Optional[bool] = True,
stream: Optional[bool] = False,
tools: Optional[List[OpenAIResponseInputTool]] = None,
) -> Union[OpenAIResponseObject, AsyncIterator[OpenAIResponseObjectStream]]:
"""Create a new OpenAI response.
:param input: Input message(s) to create the response.
:param model: The underlying LLM used for completions.
:param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
"""

View file

@ -4,12 +4,12 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import AsyncIterator, List, Literal, Optional, Protocol, Union, runtime_checkable
from typing import List, Literal, Optional, Union
from pydantic import BaseModel, Field
from typing_extensions import Annotated
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
from llama_stack.schema_utils import json_schema_type, register_schema
@json_schema_type
@ -104,7 +104,7 @@ class OpenAIResponseInputMessageContentText(BaseModel):
@json_schema_type
class OpenAIResponseInputMessageContentImage(BaseModel):
detail: Literal["low", "high", "auto"] = "auto"
detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
type: Literal["input_image"] = "input_image"
# TODO: handle file_id
image_url: Optional[str] = None
@ -121,13 +121,13 @@ register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMess
@json_schema_type
class OpenAIResponseInputMessage(BaseModel):
content: Union[str, List[OpenAIResponseInputMessageContent]]
role: Literal["system", "developer", "user", "assistant"]
role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"]
type: Optional[Literal["message"]] = "message"
@json_schema_type
class OpenAIResponseInputToolWebSearch(BaseModel):
type: Literal["web_search", "web_search_preview_2025_03_11"] = "web_search"
type: Literal["web_search"] | Literal["web_search_preview_2025_03_11"] = "web_search"
# TODO: actually use search_context_size somewhere...
search_context_size: Optional[str] = Field(default="medium", pattern="^low|medium|high$")
# TODO: add user_location
@ -138,27 +138,3 @@ OpenAIResponseInputTool = Annotated[
Field(discriminator="type"),
]
register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
@runtime_checkable
class OpenAIResponses(Protocol):
"""
OpenAI Responses API implementation.
"""
@webmethod(route="/openai/v1/responses/{id}", method="GET")
async def get_openai_response(
self,
id: str,
) -> OpenAIResponseObject: ...
@webmethod(route="/openai/v1/responses", method="POST")
async def create_openai_response(
self,
input: Union[str, List[OpenAIResponseInputMessage]],
model: str,
previous_response_id: Optional[str] = None,
store: Optional[bool] = True,
stream: Optional[bool] = False,
tools: Optional[List[OpenAIResponseInputTool]] = None,
) -> Union[OpenAIResponseObject, AsyncIterator[OpenAIResponseObjectStream]]: ...

View file

@ -24,7 +24,6 @@ class Api(Enum):
eval = "eval"
post_training = "post_training"
tool_runtime = "tool_runtime"
openai_responses = "openai_responses"
telemetry = "telemetry"

View file

@ -1,7 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .openai_responses import * # noqa: F401 F403

View file

@ -16,7 +16,6 @@ from llama_stack.apis.files import Files
from llama_stack.apis.inference import Inference
from llama_stack.apis.inspect import Inspect
from llama_stack.apis.models import Models
from llama_stack.apis.openai_responses.openai_responses import OpenAIResponses
from llama_stack.apis.post_training import PostTraining
from llama_stack.apis.providers import Providers as ProvidersAPI
from llama_stack.apis.safety import Safety
@ -81,7 +80,6 @@ def api_protocol_map() -> Dict[Api, Any]:
Api.tool_groups: ToolGroups,
Api.tool_runtime: ToolRuntime,
Api.files: Files,
Api.openai_responses: OpenAIResponses,
}

View file

@ -149,8 +149,6 @@ class CommonRoutingTableImpl(RoutingTable):
p.benchmark_store = self
elif api == Api.tool_runtime:
p.tool_store = self
elif api == Api.openai_responses:
p.model_store = self
async def shutdown(self) -> None:
for p in self.impls_by_provider_id.values():

View file

@ -23,6 +23,9 @@ from llama_stack.apis.agents import (
Document,
ListAgentSessionsResponse,
ListAgentsResponse,
OpenAIResponseInputMessage,
OpenAIResponseInputTool,
OpenAIResponseObject,
Session,
Turn,
)
@ -40,6 +43,7 @@ from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_imp
from .agent_instance import ChatAgent
from .config import MetaReferenceAgentsImplConfig
from .openai_responses import OpenAIResponsesImpl
logger = logging.getLogger()
logger.setLevel(logging.INFO)
@ -63,9 +67,16 @@ class MetaReferenceAgentsImpl(Agents):
self.tool_groups_api = tool_groups_api
self.in_memory_store = InmemoryKVStoreImpl()
self.openai_responses_impl = None
async def initialize(self) -> None:
self.persistence_store = await kvstore_impl(self.config.persistence_store)
self.openai_responses_impl = OpenAIResponsesImpl(
self.persistence_store,
inference_api=self.inference_api,
tool_groups_api=self.tool_groups_api,
tool_runtime_api=self.tool_runtime_api,
)
# check if "bwrap" is available
if not shutil.which("bwrap"):
@ -244,3 +255,23 @@ class MetaReferenceAgentsImpl(Agents):
agent_id: str,
) -> ListAgentSessionsResponse:
pass
# OpenAI responses
async def get_openai_response(
self,
id: str,
) -> OpenAIResponseObject:
return await self.openai_responses_impl.get_openai_response(id)
async def create_openai_response(
self,
input: Union[str, List[OpenAIResponseInputMessage]],
model: str,
previous_response_id: Optional[str] = None,
store: Optional[bool] = True,
stream: Optional[bool] = False,
tools: Optional[List[OpenAIResponseInputTool]] = None,
) -> OpenAIResponseObject:
return await self.openai_responses_impl.create_openai_response(
input, model, previous_response_id, store, stream, tools
)

View file

@ -10,6 +10,20 @@ from typing import AsyncIterator, List, Optional, Union, cast
from openai.types.chat import ChatCompletionToolParam
from llama_stack.apis.agents.openai_responses import (
OpenAIResponseInputMessage,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseCreated,
OpenAIResponseOutput,
OpenAIResponseOutputMessage,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageWebSearchToolCall,
)
from llama_stack.apis.inference.inference import (
Inference,
OpenAIAssistantMessageParam,
@ -24,29 +38,11 @@ from llama_stack.apis.inference.inference import (
OpenAIToolMessageParam,
OpenAIUserMessageParam,
)
from llama_stack.apis.models.models import Models, ModelType
from llama_stack.apis.openai_responses import OpenAIResponses
from llama_stack.apis.openai_responses.openai_responses import (
OpenAIResponseInputMessage,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseCreated,
OpenAIResponseOutput,
OpenAIResponseOutputMessage,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageWebSearchToolCall,
)
from llama_stack.apis.tools.tools import ToolGroups, ToolInvocationResult, ToolRuntime
from llama_stack.log import get_logger
from llama_stack.models.llama.datatypes import ToolDefinition, ToolParamDefinition
from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool
from llama_stack.providers.utils.kvstore import kvstore_impl
from .config import OpenAIResponsesImplConfig
from llama_stack.providers.utils.kvstore import KVStore
logger = get_logger(name=__name__, category="openai_responses")
@ -80,34 +76,25 @@ async def _openai_choices_to_output_messages(choices: List[OpenAIChoice]) -> Lis
return output_messages
class OpenAIResponsesImpl(OpenAIResponses):
class OpenAIResponsesImpl:
def __init__(
self,
config: OpenAIResponsesImplConfig,
models_api: Models,
persistence_store: KVStore,
inference_api: Inference,
tool_groups_api: ToolGroups,
tool_runtime_api: ToolRuntime,
):
self.config = config
self.models_api = models_api
self.persistence_store = persistence_store
self.inference_api = inference_api
self.tool_groups_api = tool_groups_api
self.tool_runtime_api = tool_runtime_api
async def initialize(self) -> None:
self.kvstore = await kvstore_impl(self.config.kvstore)
async def shutdown(self) -> None:
logger.debug("OpenAIResponsesImpl.shutdown")
pass
async def get_openai_response(
self,
id: str,
) -> OpenAIResponseObject:
key = f"{OPENAI_RESPONSES_PREFIX}{id}"
response_json = await self.kvstore.get(key=key)
response_json = await self.persistence_store.get(key=key)
if response_json is None:
raise ValueError(f"OpenAI response with id '{id}' not found")
return OpenAIResponseObject.model_validate_json(response_json)
@ -122,11 +109,6 @@ class OpenAIResponsesImpl(OpenAIResponses):
tools: Optional[List[OpenAIResponseInputTool]] = None,
):
stream = False if stream is None else stream
model_obj = await self.models_api.get_model(model)
if model_obj is None:
raise ValueError(f"Model '{model}' not found")
if model_obj.model_type == ModelType.embedding:
raise ValueError(f"Model '{model}' is an embedding model and does not support chat completions")
messages: List[OpenAIMessageParam] = []
if previous_response_id:
@ -155,7 +137,7 @@ class OpenAIResponsesImpl(OpenAIResponses):
chat_tools = await self._convert_response_tools_to_chat_tools(tools) if tools else None
chat_response = await self.inference_api.openai_chat_completion(
model=model_obj.identifier,
model=model,
messages=messages,
tools=chat_tools,
stream=stream,
@ -198,14 +180,14 @@ class OpenAIResponsesImpl(OpenAIResponses):
output_messages: List[OpenAIResponseOutput] = []
if chat_response.choices[0].finish_reason == "tool_calls":
output_messages.extend(
await self._execute_tool_and_return_final_output(model_obj.identifier, stream, chat_response, messages)
await self._execute_tool_and_return_final_output(model, stream, chat_response, messages)
)
else:
output_messages.extend(await _openai_choices_to_output_messages(chat_response.choices))
response = OpenAIResponseObject(
created_at=chat_response.created,
id=f"resp-{uuid.uuid4()}",
model=model_obj.identifier,
model=model,
object="response",
status="completed",
output=output_messages,
@ -214,7 +196,7 @@ class OpenAIResponsesImpl(OpenAIResponses):
if store:
# Store in kvstore
key = f"{OPENAI_RESPONSES_PREFIX}{response.id}"
await self.kvstore.set(
await self.persistence_store.set(
key=key,
value=response.model_dump_json(),
)

View file

@ -1,21 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any, Dict
from llama_stack.apis.datatypes import Api
from .config import OpenAIResponsesImplConfig
async def get_provider_impl(config: OpenAIResponsesImplConfig, deps: Dict[Api, Any]):
from .openai_responses import OpenAIResponsesImpl
impl = OpenAIResponsesImpl(
config, deps[Api.models], deps[Api.inference], deps[Api.tool_groups], deps[Api.tool_runtime]
)
await impl.initialize()
return impl

View file

@ -1,24 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any, Dict
from pydantic import BaseModel
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
class OpenAIResponsesImplConfig(BaseModel):
kvstore: KVStoreConfig
@classmethod
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
return {
"kvstore": SqliteKVStoreConfig.sample_run_config(
__distro_dir__=__distro_dir__,
db_name="openai_responses.db",
)
}

View file

@ -1,27 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.openai_responses,
provider_type="inline::openai-responses",
pip_packages=[],
module="llama_stack.providers.inline.openai_responses",
config_class="llama_stack.providers.inline.openai_responses.config.OpenAIResponsesImplConfig",
api_dependencies=[
Api.models,
Api.inference,
Api.tool_groups,
Api.tool_runtime,
],
),
]

View file

@ -478,6 +478,8 @@ class JsonSchemaGenerator:
}
return ret
elif origin_type is Literal:
if len(typing.get_args(typ)) != 1:
print(f"Literal type {typ} has {len(typing.get_args(typ))} arguments")
(literal_value,) = typing.get_args(typ) # unpack value of literal type
schema = self.type_to_schema(type(literal_value))
schema["const"] = literal_value

View file

@ -24,8 +24,6 @@ distribution_spec:
- inline::braintrust
telemetry:
- inline::meta-reference
openai_responses:
- inline::openai-responses
tool_runtime:
- remote::brave-search
- remote::tavily-search

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- openai_responses
- safety
- scoring
- telemetry
@ -92,14 +91,6 @@ providers:
service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/remote-vllm/trace_store.db}
openai_responses:
- provider_id: openai-responses
provider_type: inline::openai-responses
config:
kvstore:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/openai_responses.db
tool_runtime:
- provider_id: brave-search
provider_type: remote::brave-search

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- openai_responses
- safety
- scoring
- telemetry
@ -85,14 +84,6 @@ providers:
service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/remote-vllm/trace_store.db}
openai_responses:
- provider_id: openai-responses
provider_type: inline::openai-responses
config:
kvstore:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/openai_responses.db
tool_runtime:
- provider_id: brave-search
provider_type: remote::brave-search

View file

@ -31,7 +31,6 @@ def get_distribution_template() -> DistributionTemplate:
"datasetio": ["remote::huggingface", "inline::localfs"],
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
"telemetry": ["inline::meta-reference"],
"openai_responses": ["inline::openai-responses"],
"tool_runtime": [
"remote::brave-search",
"remote::tavily-search",

View file

@ -24,8 +24,6 @@ distribution_spec:
- inline::basic
- inline::llm-as-judge
- inline::braintrust
openai_responses:
- inline::openai-responses
tool_runtime:
- remote::brave-search
- remote::tavily-search

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- openai_responses
- safety
- scoring
- telemetry
@ -88,14 +87,6 @@ providers:
provider_type: inline::braintrust
config:
openai_api_key: ${env.OPENAI_API_KEY:}
openai_responses:
- provider_id: openai-responses
provider_type: inline::openai-responses
config:
kvstore:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/openai_responses.db
tool_runtime:
- provider_id: brave-search
provider_type: remote::brave-search

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- openai_responses
- safety
- scoring
- telemetry
@ -83,14 +82,6 @@ providers:
provider_type: inline::braintrust
config:
openai_api_key: ${env.OPENAI_API_KEY:}
openai_responses:
- provider_id: openai-responses
provider_type: inline::openai-responses
config:
kvstore:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/openai_responses.db
tool_runtime:
- provider_id: brave-search
provider_type: remote::brave-search

View file

@ -36,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate:
"eval": ["inline::meta-reference"],
"datasetio": ["remote::huggingface", "inline::localfs"],
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
"openai_responses": ["inline::openai-responses"],
"tool_runtime": [
"remote::brave-search",
"remote::tavily-search",