mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 12:06:04 +00:00
Handle review comments
This commit is contained in:
parent
70217cc032
commit
f7b6d7f045
7 changed files with 42 additions and 126 deletions
24
docs/static/deprecated-llama-stack-spec.html
vendored
24
docs/static/deprecated-llama-stack-spec.html
vendored
|
|
@ -9025,17 +9025,7 @@
|
||||||
"description": "(Optional) Token usage information for the response"
|
"description": "(Optional) Token usage information for the response"
|
||||||
},
|
},
|
||||||
"instructions": {
|
"instructions": {
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInput"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "(Optional) System message inserted into the model's context"
|
"description": "(Optional) System message inserted into the model's context"
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
|
|
@ -9917,17 +9907,7 @@
|
||||||
"description": "(Optional) Token usage information for the response"
|
"description": "(Optional) Token usage information for the response"
|
||||||
},
|
},
|
||||||
"instructions": {
|
"instructions": {
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInput"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "(Optional) System message inserted into the model's context"
|
"description": "(Optional) System message inserted into the model's context"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
12
docs/static/deprecated-llama-stack-spec.yaml
vendored
12
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -6735,11 +6735,7 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Token usage information for the response
|
(Optional) Token usage information for the response
|
||||||
instructions:
|
instructions:
|
||||||
oneOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) System message inserted into the model's context
|
(Optional) System message inserted into the model's context
|
||||||
input:
|
input:
|
||||||
|
|
@ -7412,11 +7408,7 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Token usage information for the response
|
(Optional) Token usage information for the response
|
||||||
instructions:
|
instructions:
|
||||||
oneOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) System message inserted into the model's context
|
(Optional) System message inserted into the model's context
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
|
||||||
24
docs/static/llama-stack-spec.html
vendored
24
docs/static/llama-stack-spec.html
vendored
|
|
@ -7601,17 +7601,7 @@
|
||||||
"description": "(Optional) Token usage information for the response"
|
"description": "(Optional) Token usage information for the response"
|
||||||
},
|
},
|
||||||
"instructions": {
|
"instructions": {
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInput"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "(Optional) System message inserted into the model's context"
|
"description": "(Optional) System message inserted into the model's context"
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
|
|
@ -8164,17 +8154,7 @@
|
||||||
"description": "(Optional) Token usage information for the response"
|
"description": "(Optional) Token usage information for the response"
|
||||||
},
|
},
|
||||||
"instructions": {
|
"instructions": {
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInput"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "(Optional) System message inserted into the model's context"
|
"description": "(Optional) System message inserted into the model's context"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
12
docs/static/llama-stack-spec.yaml
vendored
12
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -5816,11 +5816,7 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Token usage information for the response
|
(Optional) Token usage information for the response
|
||||||
instructions:
|
instructions:
|
||||||
oneOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) System message inserted into the model's context
|
(Optional) System message inserted into the model's context
|
||||||
input:
|
input:
|
||||||
|
|
@ -6227,11 +6223,7 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Token usage information for the response
|
(Optional) Token usage information for the response
|
||||||
instructions:
|
instructions:
|
||||||
oneOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) System message inserted into the model's context
|
(Optional) System message inserted into the model's context
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
|
||||||
24
docs/static/stainless-llama-stack-spec.html
vendored
24
docs/static/stainless-llama-stack-spec.html
vendored
|
|
@ -9273,17 +9273,7 @@
|
||||||
"description": "(Optional) Token usage information for the response"
|
"description": "(Optional) Token usage information for the response"
|
||||||
},
|
},
|
||||||
"instructions": {
|
"instructions": {
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInput"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "(Optional) System message inserted into the model's context"
|
"description": "(Optional) System message inserted into the model's context"
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
|
|
@ -9836,17 +9826,7 @@
|
||||||
"description": "(Optional) Token usage information for the response"
|
"description": "(Optional) Token usage information for the response"
|
||||||
},
|
},
|
||||||
"instructions": {
|
"instructions": {
|
||||||
"oneOf": [
|
"type": "string",
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInput"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "(Optional) System message inserted into the model's context"
|
"description": "(Optional) System message inserted into the model's context"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
||||||
12
docs/static/stainless-llama-stack-spec.yaml
vendored
12
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -7029,11 +7029,7 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Token usage information for the response
|
(Optional) Token usage information for the response
|
||||||
instructions:
|
instructions:
|
||||||
oneOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) System message inserted into the model's context
|
(Optional) System message inserted into the model's context
|
||||||
input:
|
input:
|
||||||
|
|
@ -7440,11 +7436,7 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Token usage information for the response
|
(Optional) Token usage information for the response
|
||||||
instructions:
|
instructions:
|
||||||
oneOf:
|
type: string
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) System message inserted into the model's context
|
(Optional) System message inserted into the model's context
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
|
|
|
||||||
|
|
@ -327,35 +327,6 @@ OpenAIResponseOutput = Annotated[
|
||||||
register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
|
register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputFunctionToolCallOutput(BaseModel):
|
|
||||||
"""
|
|
||||||
This represents the output of a function call that gets passed back to the model.
|
|
||||||
"""
|
|
||||||
|
|
||||||
call_id: str
|
|
||||||
output: str
|
|
||||||
type: Literal["function_call_output"] = "function_call_output"
|
|
||||||
id: str | None = None
|
|
||||||
status: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
OpenAIResponseInput = Annotated[
|
|
||||||
# Responses API allows output messages to be passed in as input
|
|
||||||
OpenAIResponseOutputMessageWebSearchToolCall
|
|
||||||
| OpenAIResponseOutputMessageFileSearchToolCall
|
|
||||||
| OpenAIResponseOutputMessageFunctionToolCall
|
|
||||||
| OpenAIResponseInputFunctionToolCallOutput
|
|
||||||
| OpenAIResponseMCPApprovalRequest
|
|
||||||
| OpenAIResponseMCPApprovalResponse
|
|
||||||
| OpenAIResponseOutputMessageMCPCall
|
|
||||||
| OpenAIResponseOutputMessageMCPListTools
|
|
||||||
| OpenAIResponseMessage,
|
|
||||||
Field(union_mode="left_to_right"),
|
|
||||||
]
|
|
||||||
register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
|
|
||||||
|
|
||||||
|
|
||||||
# This has to be a TypedDict because we need a "schema" field and our strong
|
# This has to be a TypedDict because we need a "schema" field and our strong
|
||||||
# typing code in the schema generator doesn't support Pydantic aliases. That also
|
# typing code in the schema generator doesn't support Pydantic aliases. That also
|
||||||
# means we can't use a discriminator field here, because TypedDicts don't support
|
# means we can't use a discriminator field here, because TypedDicts don't support
|
||||||
|
|
@ -594,7 +565,7 @@ class OpenAIResponseObject(BaseModel):
|
||||||
tools: list[OpenAIResponseTool] | None = None
|
tools: list[OpenAIResponseTool] | None = None
|
||||||
truncation: str | None = None
|
truncation: str | None = None
|
||||||
usage: OpenAIResponseUsage | None = None
|
usage: OpenAIResponseUsage | None = None
|
||||||
instructions: str | list[OpenAIResponseInput] | None = None
|
instructions: str | None = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
|
@ -1268,6 +1239,35 @@ OpenAIResponseObjectStream = Annotated[
|
||||||
register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
|
register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputFunctionToolCallOutput(BaseModel):
|
||||||
|
"""
|
||||||
|
This represents the output of a function call that gets passed back to the model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
call_id: str
|
||||||
|
output: str
|
||||||
|
type: Literal["function_call_output"] = "function_call_output"
|
||||||
|
id: str | None = None
|
||||||
|
status: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
OpenAIResponseInput = Annotated[
|
||||||
|
# Responses API allows output messages to be passed in as input
|
||||||
|
OpenAIResponseOutputMessageWebSearchToolCall
|
||||||
|
| OpenAIResponseOutputMessageFileSearchToolCall
|
||||||
|
| OpenAIResponseOutputMessageFunctionToolCall
|
||||||
|
| OpenAIResponseInputFunctionToolCallOutput
|
||||||
|
| OpenAIResponseMCPApprovalRequest
|
||||||
|
| OpenAIResponseMCPApprovalResponse
|
||||||
|
| OpenAIResponseOutputMessageMCPCall
|
||||||
|
| OpenAIResponseOutputMessageMCPListTools
|
||||||
|
| OpenAIResponseMessage,
|
||||||
|
Field(union_mode="left_to_right"),
|
||||||
|
]
|
||||||
|
register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
|
||||||
|
|
||||||
|
|
||||||
class ListOpenAIResponseInputItem(BaseModel):
|
class ListOpenAIResponseInputItem(BaseModel):
|
||||||
"""List container for OpenAI response input items.
|
"""List container for OpenAI response input items.
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue