mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-05 04:17:32 +00:00
fix(openai_responses): OpenAIResponsesObject is not complete
This commit is contained in:
parent
17a06452e7
commit
9b3f3740ad
3 changed files with 78 additions and 29 deletions
|
@ -4,7 +4,7 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from typing import Annotated, Any, Literal, Optional, TypeAlias, Union
|
from typing import Annotated, Any, Literal, Optional
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
@ -14,12 +14,16 @@ from llama_stack.apis.tools.openai_tool_choice import (
|
||||||
ToolChoiceCustom,
|
ToolChoiceCustom,
|
||||||
ToolChoiceFunction,
|
ToolChoiceFunction,
|
||||||
ToolChoiceMcp,
|
ToolChoiceMcp,
|
||||||
ToolChoiceTypes
|
ToolChoiceTypes,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions
|
from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions
|
||||||
from llama_stack.schema_utils import json_schema_type, register_schema
|
from llama_stack.schema_utils import json_schema_type, register_schema
|
||||||
|
|
||||||
OpenAIResponsesToolChoice: TypeAlias = Union[ToolChoiceTypes, ToolChoiceAllowed, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom]
|
type OpenAIResponsesToolChoice = (
|
||||||
|
ToolChoiceTypes | ToolChoiceAllowed | ToolChoiceFunction | ToolChoiceMcp | ToolChoiceCustom
|
||||||
|
)
|
||||||
|
register_schema(OpenAIResponsesToolChoice, name="OpenAIResponsesToolChoice")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponseError(BaseModel):
|
class OpenAIResponseError(BaseModel):
|
||||||
|
@ -346,8 +350,8 @@ class OpenAIResponsePrompt(BaseModel):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
id: str
|
id: str
|
||||||
variables: Optional[dict[str, Any]] = None
|
variables: dict[str, Any] | None = None
|
||||||
version: Optional[str] = None
|
version: str | None = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -358,26 +362,26 @@ class OpenAIResponseReasoning(BaseModel):
|
||||||
:param generate_summary: Deprecated. Use the generate_summary_text field instead. (Optional) Whether to generate a summary of the reasoning process.
|
:param generate_summary: Deprecated. Use the generate_summary_text field instead. (Optional) Whether to generate a summary of the reasoning process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
effort: Optional[Literal["low", "medium", "high", "minimal"]] = None
|
effort: Literal["low", "medium", "high", "minimal"] | None = None
|
||||||
generate_summary: Optional[str] = None
|
generate_summary: str | None = None
|
||||||
summary: Optional[str] = None
|
summary: str | None = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponsesTool(BaseModel):
|
class OpenAIResponsesTool(BaseModel):
|
||||||
description: Optional[str] = None
|
description: str | None = None
|
||||||
"""
|
"""
|
||||||
The description of the function, including guidance on when and how to call it,
|
The description of the function, including guidance on when and how to call it,
|
||||||
and guidance about what to tell the user when calling (if anything).
|
and guidance about what to tell the user when calling (if anything).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
name: Optional[str] = None
|
name: str | None = None
|
||||||
"""The name of the function."""
|
"""The name of the function."""
|
||||||
|
|
||||||
parameters: Optional[object] = None
|
parameters: object | None = None
|
||||||
"""Parameters of the function in JSON Schema."""
|
"""Parameters of the function in JSON Schema."""
|
||||||
|
|
||||||
type: Optional[Literal["function"]] = None
|
type: Literal["function"] | None = None
|
||||||
"""The type of the tool, i.e. `function`."""
|
"""The type of the tool, i.e. `function`."""
|
||||||
|
|
||||||
|
|
||||||
|
@ -414,7 +418,7 @@ class OpenAIResponseObject(BaseModel):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
created_at: int
|
created_at: int
|
||||||
error: Optional[OpenAIResponseError] = None
|
error: OpenAIResponseError | None = None
|
||||||
id: str
|
id: str
|
||||||
incomplete_details: Optional[OpenAIResponseIncompleteDetails] = None
|
incomplete_details: Optional[OpenAIResponseIncompleteDetails] = None
|
||||||
instructions: Optional[str | list[str]] = None
|
instructions: Optional[str | list[str]] = None
|
||||||
|
|
|
@ -1,10 +1,20 @@
|
||||||
from typing import Dict, List, Literal, Optional, TypeAlias
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
ToolChoiceOptions: TypeAlias = Literal["none", "auto", "required"]
|
from llama_stack.schema_utils import json_schema_type, register_schema
|
||||||
|
|
||||||
|
type ToolChoiceOptions = Literal["none", "auto", "required"]
|
||||||
|
register_schema(ToolChoiceOptions, name="ToolChoiceOptions")
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
class ToolChoiceTypes(BaseModel):
|
class ToolChoiceTypes(BaseModel):
|
||||||
type: Literal[
|
type: Literal[
|
||||||
"file_search",
|
"file_search",
|
||||||
|
@ -26,6 +36,7 @@ class ToolChoiceTypes(BaseModel):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
class ToolChoiceAllowed(BaseModel):
|
class ToolChoiceAllowed(BaseModel):
|
||||||
mode: Literal["auto", "required"]
|
mode: Literal["auto", "required"]
|
||||||
"""Constrains the tools available to the model to a pre-defined set.
|
"""Constrains the tools available to the model to a pre-defined set.
|
||||||
|
@ -36,7 +47,7 @@ class ToolChoiceAllowed(BaseModel):
|
||||||
`required` requires the model to call one or more of the allowed tools.
|
`required` requires the model to call one or more of the allowed tools.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tools: List[Dict[str, object]]
|
tools: list[dict[str, object]]
|
||||||
"""A list of tool definitions that the model should be allowed to call.
|
"""A list of tool definitions that the model should be allowed to call.
|
||||||
|
|
||||||
For the Responses API, the list of tool definitions might look like:
|
For the Responses API, the list of tool definitions might look like:
|
||||||
|
@ -54,6 +65,7 @@ class ToolChoiceAllowed(BaseModel):
|
||||||
"""Allowed tool configuration type. Always `allowed_tools`."""
|
"""Allowed tool configuration type. Always `allowed_tools`."""
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
class ToolChoiceFunction(BaseModel):
|
class ToolChoiceFunction(BaseModel):
|
||||||
name: str
|
name: str
|
||||||
"""The name of the function to call."""
|
"""The name of the function to call."""
|
||||||
|
@ -62,6 +74,7 @@ class ToolChoiceFunction(BaseModel):
|
||||||
"""For function calling, the type is always `function`."""
|
"""For function calling, the type is always `function`."""
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
class ToolChoiceMcp(BaseModel):
|
class ToolChoiceMcp(BaseModel):
|
||||||
server_label: str
|
server_label: str
|
||||||
"""The label of the MCP server to use."""
|
"""The label of the MCP server to use."""
|
||||||
|
@ -69,10 +82,11 @@ class ToolChoiceMcp(BaseModel):
|
||||||
type: Literal["mcp"]
|
type: Literal["mcp"]
|
||||||
"""For MCP tools, the type is always `mcp`."""
|
"""For MCP tools, the type is always `mcp`."""
|
||||||
|
|
||||||
name: Optional[str] = None
|
name: str | None = None
|
||||||
"""The name of the tool to call on the server."""
|
"""The name of the tool to call on the server."""
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
class ToolChoiceCustom(BaseModel):
|
class ToolChoiceCustom(BaseModel):
|
||||||
name: str
|
name: str
|
||||||
"""The name of the custom tool to call."""
|
"""The name of the custom tool to call."""
|
||||||
|
|
|
@ -93,7 +93,14 @@ def get_class_property_docstrings(
|
||||||
"""
|
"""
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
for base in inspect.getmro(data_type):
|
# Check if the type has __mro__ (method resolution order)
|
||||||
|
if hasattr(data_type, "__mro__"):
|
||||||
|
bases = inspect.getmro(data_type)
|
||||||
|
else:
|
||||||
|
# For TypeAliasType or other types without __mro__, just use the type itself
|
||||||
|
bases = [data_type] if hasattr(data_type, "__doc__") else []
|
||||||
|
|
||||||
|
for base in bases:
|
||||||
docstr = docstring.parse_type(base)
|
docstr = docstring.parse_type(base)
|
||||||
for param in docstr.params.values():
|
for param in docstr.params.values():
|
||||||
if param.name in result:
|
if param.name in result:
|
||||||
|
@ -479,12 +486,25 @@ class JsonSchemaGenerator:
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
elif origin_type is Literal:
|
elif origin_type is Literal:
|
||||||
if len(typing.get_args(typ)) != 1:
|
literal_values = typing.get_args(typ)
|
||||||
raise ValueError(f"Literal type {typ} has {len(typing.get_args(typ))} arguments")
|
if len(literal_values) == 1:
|
||||||
(literal_value,) = typing.get_args(typ) # unpack value of literal type
|
# Single literal value - use const
|
||||||
|
(literal_value,) = literal_values
|
||||||
schema = self.type_to_schema(type(literal_value))
|
schema = self.type_to_schema(type(literal_value))
|
||||||
schema["const"] = literal_value
|
schema["const"] = literal_value
|
||||||
return schema
|
return schema
|
||||||
|
else:
|
||||||
|
# Multiple literal values - use enum
|
||||||
|
# Check that all literal values have the same type
|
||||||
|
literal_types = {type(value) for value in literal_values}
|
||||||
|
if len(literal_types) != 1:
|
||||||
|
raise ValueError(f"Literal type {typ} has inconsistent value types: {literal_types}")
|
||||||
|
|
||||||
|
# Create schema based on the common type of all literal values
|
||||||
|
common_type = literal_types.pop()
|
||||||
|
schema = self.type_to_schema(common_type)
|
||||||
|
schema["enum"] = list(literal_values)
|
||||||
|
return schema
|
||||||
elif origin_type is type:
|
elif origin_type is type:
|
||||||
(concrete_type,) = typing.get_args(typ) # unpack single tuple element
|
(concrete_type,) = typing.get_args(typ) # unpack single tuple element
|
||||||
return {"const": self.type_to_schema(concrete_type, force_expand=True)}
|
return {"const": self.type_to_schema(concrete_type, force_expand=True)}
|
||||||
|
@ -492,13 +512,24 @@ class JsonSchemaGenerator:
|
||||||
(concrete_type,) = typing.get_args(typ)
|
(concrete_type,) = typing.get_args(typ)
|
||||||
return self.type_to_schema(concrete_type)
|
return self.type_to_schema(concrete_type)
|
||||||
|
|
||||||
|
# Check if this is a TypeAliasType (Python 3.12+) which doesn't have __mro__
|
||||||
|
if hasattr(typ, "__mro__"):
|
||||||
# dictionary of class attributes
|
# dictionary of class attributes
|
||||||
members = dict(inspect.getmembers(typ, lambda a: not inspect.isroutine(a)))
|
members = dict(inspect.getmembers(typ, lambda a: not inspect.isroutine(a)))
|
||||||
|
|
||||||
property_docstrings = get_class_property_docstrings(typ, self.options.property_description_fun)
|
property_docstrings = get_class_property_docstrings(typ, self.options.property_description_fun)
|
||||||
|
else:
|
||||||
|
# TypeAliasType or other types without __mro__
|
||||||
|
members = {}
|
||||||
|
property_docstrings = {}
|
||||||
properties: Dict[str, Schema] = {}
|
properties: Dict[str, Schema] = {}
|
||||||
required: List[str] = []
|
required: List[str] = []
|
||||||
for property_name, property_type in get_class_properties(typ):
|
# Only process properties if the type supports class properties
|
||||||
|
if hasattr(typ, "__mro__"):
|
||||||
|
class_properties = get_class_properties(typ)
|
||||||
|
else:
|
||||||
|
class_properties = []
|
||||||
|
|
||||||
|
for property_name, property_type in class_properties:
|
||||||
# rename property if an alias name is specified
|
# rename property if an alias name is specified
|
||||||
alias = get_annotation(property_type, Alias)
|
alias = get_annotation(property_type, Alias)
|
||||||
if alias:
|
if alias:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue