From 9b3f3740adab6338069b4be62f23f4bd39c43f82 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Mon, 18 Aug 2025 15:27:54 -0400 Subject: [PATCH] fix(openai_responses): OpenAIResponsesObject is not complete --- llama_stack/apis/agents/openai_responses.py | 30 ++++++----- llama_stack/apis/tools/openai_tool_choice.py | 22 ++++++-- llama_stack/strong_typing/schema.py | 55 +++++++++++++++----- 3 files changed, 78 insertions(+), 29 deletions(-) diff --git a/llama_stack/apis/agents/openai_responses.py b/llama_stack/apis/agents/openai_responses.py index 7eac4849e..a3d888b28 100644 --- a/llama_stack/apis/agents/openai_responses.py +++ b/llama_stack/apis/agents/openai_responses.py @@ -4,7 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from typing import Annotated, Any, Literal, Optional, TypeAlias, Union +from typing import Annotated, Any, Literal, Optional from pydantic import BaseModel, Field from typing_extensions import TypedDict @@ -14,12 +14,16 @@ from llama_stack.apis.tools.openai_tool_choice import ( ToolChoiceCustom, ToolChoiceFunction, ToolChoiceMcp, - ToolChoiceTypes + ToolChoiceTypes, ) from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions from llama_stack.schema_utils import json_schema_type, register_schema -OpenAIResponsesToolChoice: TypeAlias = Union[ToolChoiceTypes, ToolChoiceAllowed, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom] +type OpenAIResponsesToolChoice = ( + ToolChoiceTypes | ToolChoiceAllowed | ToolChoiceFunction | ToolChoiceMcp | ToolChoiceCustom +) +register_schema(OpenAIResponsesToolChoice, name="OpenAIResponsesToolChoice") + @json_schema_type class OpenAIResponseError(BaseModel): @@ -346,8 +350,8 @@ class OpenAIResponsePrompt(BaseModel): """ id: str - variables: Optional[dict[str, Any]] = None - version: Optional[str] = None + variables: dict[str, Any] | None = None + version: str | None = None @json_schema_type @@ -358,26 +362,26 @@ class OpenAIResponseReasoning(BaseModel): :param generate_summary: Deprecated. Use the generate_summary_text field instead. (Optional) Whether to generate a summary of the reasoning process. """ - effort: Optional[Literal["low", "medium", "high", "minimal"]] = None - generate_summary: Optional[str] = None - summary: Optional[str] = None + effort: Literal["low", "medium", "high", "minimal"] | None = None + generate_summary: str | None = None + summary: str | None = None @json_schema_type class OpenAIResponsesTool(BaseModel): - description: Optional[str] = None + description: str | None = None """ The description of the function, including guidance on when and how to call it, and guidance about what to tell the user when calling (if anything). """ - name: Optional[str] = None + name: str | None = None """The name of the function.""" - parameters: Optional[object] = None + parameters: object | None = None """Parameters of the function in JSON Schema.""" - type: Optional[Literal["function"]] = None + type: Literal["function"] | None = None """The type of the tool, i.e. `function`.""" @@ -414,7 +418,7 @@ class OpenAIResponseObject(BaseModel): """ created_at: int - error: Optional[OpenAIResponseError] = None + error: OpenAIResponseError | None = None id: str incomplete_details: Optional[OpenAIResponseIncompleteDetails] = None instructions: Optional[str | list[str]] = None diff --git a/llama_stack/apis/tools/openai_tool_choice.py b/llama_stack/apis/tools/openai_tool_choice.py index 182dd267d..c7ab4a417 100644 --- a/llama_stack/apis/tools/openai_tool_choice.py +++ b/llama_stack/apis/tools/openai_tool_choice.py @@ -1,10 +1,20 @@ -from typing import Dict, List, Literal, Optional, TypeAlias +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import Literal from pydantic import BaseModel -ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"] +from llama_stack.schema_utils import json_schema_type, register_schema + +type ToolChoiceOptions = Literal["none", "auto", "required"] +register_schema(ToolChoiceOptions, name="ToolChoiceOptions") +@json_schema_type class ToolChoiceTypes(BaseModel): type: Literal[ "file_search", @@ -26,6 +36,7 @@ class ToolChoiceTypes(BaseModel): """ +@json_schema_type class ToolChoiceAllowed(BaseModel): mode: Literal["auto", "required"] """Constrains the tools available to the model to a pre-defined set. @@ -36,7 +47,7 @@ class ToolChoiceAllowed(BaseModel): `required` requires the model to call one or more of the allowed tools. """ - tools: List[Dict[str, object]] + tools: list[dict[str, object]] """A list of tool definitions that the model should be allowed to call. For the Responses API, the list of tool definitions might look like: @@ -54,6 +65,7 @@ class ToolChoiceAllowed(BaseModel): """Allowed tool configuration type. Always `allowed_tools`.""" +@json_schema_type class ToolChoiceFunction(BaseModel): name: str """The name of the function to call.""" @@ -62,6 +74,7 @@ class ToolChoiceFunction(BaseModel): """For function calling, the type is always `function`.""" +@json_schema_type class ToolChoiceMcp(BaseModel): server_label: str """The label of the MCP server to use.""" @@ -69,10 +82,11 @@ class ToolChoiceMcp(BaseModel): type: Literal["mcp"] """For MCP tools, the type is always `mcp`.""" - name: Optional[str] = None + name: str | None = None """The name of the tool to call on the server.""" +@json_schema_type class ToolChoiceCustom(BaseModel): name: str """The name of the custom tool to call.""" diff --git a/llama_stack/strong_typing/schema.py b/llama_stack/strong_typing/schema.py index 82baddc86..a8fc86a72 100644 --- a/llama_stack/strong_typing/schema.py +++ b/llama_stack/strong_typing/schema.py @@ -93,7 +93,14 @@ def get_class_property_docstrings( """ result = {} - for base in inspect.getmro(data_type): + # Check if the type has __mro__ (method resolution order) + if hasattr(data_type, "__mro__"): + bases = inspect.getmro(data_type) + else: + # For TypeAliasType or other types without __mro__, just use the type itself + bases = [data_type] if hasattr(data_type, "__doc__") else [] + + for base in bases: docstr = docstring.parse_type(base) for param in docstr.params.values(): if param.name in result: @@ -479,12 +486,25 @@ class JsonSchemaGenerator: } return ret elif origin_type is Literal: - if len(typing.get_args(typ)) != 1: - raise ValueError(f"Literal type {typ} has {len(typing.get_args(typ))} arguments") - (literal_value,) = typing.get_args(typ) # unpack value of literal type - schema = self.type_to_schema(type(literal_value)) - schema["const"] = literal_value - return schema + literal_values = typing.get_args(typ) + if len(literal_values) == 1: + # Single literal value - use const + (literal_value,) = literal_values + schema = self.type_to_schema(type(literal_value)) + schema["const"] = literal_value + return schema + else: + # Multiple literal values - use enum + # Check that all literal values have the same type + literal_types = {type(value) for value in literal_values} + if len(literal_types) != 1: + raise ValueError(f"Literal type {typ} has inconsistent value types: {literal_types}") + + # Create schema based on the common type of all literal values + common_type = literal_types.pop() + schema = self.type_to_schema(common_type) + schema["enum"] = list(literal_values) + return schema elif origin_type is type: (concrete_type,) = typing.get_args(typ) # unpack single tuple element return {"const": self.type_to_schema(concrete_type, force_expand=True)} @@ -492,13 +512,24 @@ class JsonSchemaGenerator: (concrete_type,) = typing.get_args(typ) return self.type_to_schema(concrete_type) - # dictionary of class attributes - members = dict(inspect.getmembers(typ, lambda a: not inspect.isroutine(a))) - - property_docstrings = get_class_property_docstrings(typ, self.options.property_description_fun) + # Check if this is a TypeAliasType (Python 3.12+) which doesn't have __mro__ + if hasattr(typ, "__mro__"): + # dictionary of class attributes + members = dict(inspect.getmembers(typ, lambda a: not inspect.isroutine(a))) + property_docstrings = get_class_property_docstrings(typ, self.options.property_description_fun) + else: + # TypeAliasType or other types without __mro__ + members = {} + property_docstrings = {} properties: Dict[str, Schema] = {} required: List[str] = [] - for property_name, property_type in get_class_properties(typ): + # Only process properties if the type supports class properties + if hasattr(typ, "__mro__"): + class_properties = get_class_properties(typ) + else: + class_properties = [] + + for property_name, property_type in class_properties: # rename property if an alias name is specified alias = get_annotation(property_type, Alias) if alias: