fix(openai_responses): OpenAIResponsesObject is not complete

This commit is contained in:
Emilio Garcia 2025-08-18 15:27:54 -04:00
parent 17a06452e7
commit 9b3f3740ad
3 changed files with 78 additions and 29 deletions

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Annotated, Any, Literal, Optional, TypeAlias, Union
from typing import Annotated, Any, Literal, Optional
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
@ -14,12 +14,16 @@ from llama_stack.apis.tools.openai_tool_choice import (
ToolChoiceCustom,
ToolChoiceFunction,
ToolChoiceMcp,
ToolChoiceTypes
ToolChoiceTypes,
)
from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions
from llama_stack.schema_utils import json_schema_type, register_schema
OpenAIResponsesToolChoice: TypeAlias = Union[ToolChoiceTypes, ToolChoiceAllowed, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom]
type OpenAIResponsesToolChoice = (
ToolChoiceTypes | ToolChoiceAllowed | ToolChoiceFunction | ToolChoiceMcp | ToolChoiceCustom
)
register_schema(OpenAIResponsesToolChoice, name="OpenAIResponsesToolChoice")
@json_schema_type
class OpenAIResponseError(BaseModel):
@ -346,8 +350,8 @@ class OpenAIResponsePrompt(BaseModel):
"""
id: str
variables: Optional[dict[str, Any]] = None
version: Optional[str] = None
variables: dict[str, Any] | None = None
version: str | None = None
@json_schema_type
@ -358,26 +362,26 @@ class OpenAIResponseReasoning(BaseModel):
:param generate_summary: Deprecated. Use the generate_summary_text field instead. (Optional) Whether to generate a summary of the reasoning process.
"""
effort: Optional[Literal["low", "medium", "high", "minimal"]] = None
generate_summary: Optional[str] = None
summary: Optional[str] = None
effort: Literal["low", "medium", "high", "minimal"] | None = None
generate_summary: str | None = None
summary: str | None = None
@json_schema_type
class OpenAIResponsesTool(BaseModel):
description: Optional[str] = None
description: str | None = None
"""
The description of the function, including guidance on when and how to call it,
and guidance about what to tell the user when calling (if anything).
"""
name: Optional[str] = None
name: str | None = None
"""The name of the function."""
parameters: Optional[object] = None
parameters: object | None = None
"""Parameters of the function in JSON Schema."""
type: Optional[Literal["function"]] = None
type: Literal["function"] | None = None
"""The type of the tool, i.e. `function`."""
@ -414,7 +418,7 @@ class OpenAIResponseObject(BaseModel):
"""
created_at: int
error: Optional[OpenAIResponseError] = None
error: OpenAIResponseError | None = None
id: str
incomplete_details: Optional[OpenAIResponseIncompleteDetails] = None
instructions: Optional[str | list[str]] = None

View file

@ -1,10 +1,20 @@
from typing import Dict, List, Literal, Optional, TypeAlias
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Literal
from pydantic import BaseModel
ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"]
from llama_stack.schema_utils import json_schema_type, register_schema
type ToolChoiceOptions = Literal["none", "auto", "required"]
register_schema(ToolChoiceOptions, name="ToolChoiceOptions")
@json_schema_type
class ToolChoiceTypes(BaseModel):
type: Literal[
"file_search",
@ -26,6 +36,7 @@ class ToolChoiceTypes(BaseModel):
"""
@json_schema_type
class ToolChoiceAllowed(BaseModel):
mode: Literal["auto", "required"]
"""Constrains the tools available to the model to a pre-defined set.
@ -36,7 +47,7 @@ class ToolChoiceAllowed(BaseModel):
`required` requires the model to call one or more of the allowed tools.
"""
tools: List[Dict[str, object]]
tools: list[dict[str, object]]
"""A list of tool definitions that the model should be allowed to call.
For the Responses API, the list of tool definitions might look like:
@ -54,6 +65,7 @@ class ToolChoiceAllowed(BaseModel):
"""Allowed tool configuration type. Always `allowed_tools`."""
@json_schema_type
class ToolChoiceFunction(BaseModel):
name: str
"""The name of the function to call."""
@ -62,6 +74,7 @@ class ToolChoiceFunction(BaseModel):
"""For function calling, the type is always `function`."""
@json_schema_type
class ToolChoiceMcp(BaseModel):
server_label: str
"""The label of the MCP server to use."""
@ -69,10 +82,11 @@ class ToolChoiceMcp(BaseModel):
type: Literal["mcp"]
"""For MCP tools, the type is always `mcp`."""
name: Optional[str] = None
name: str | None = None
"""The name of the tool to call on the server."""
@json_schema_type
class ToolChoiceCustom(BaseModel):
name: str
"""The name of the custom tool to call."""