mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
even more cleanup, the deltas should be much smaller now
This commit is contained in:
parent
5293b4e5e9
commit
9deb0beb86
14 changed files with 5038 additions and 17435 deletions
File diff suppressed because it is too large
Load diff
2959
docs/static/deprecated-llama-stack-spec.yaml
vendored
2959
docs/static/deprecated-llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
7475
docs/static/experimental-llama-stack-spec.yaml
vendored
7475
docs/static/experimental-llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
3653
docs/static/llama-stack-spec.yaml
vendored
3653
docs/static/llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
3504
docs/static/stainless-llama-stack-spec.yaml
vendored
3504
docs/static/stainless-llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
|
|
@ -12,462 +12,492 @@ These lists help the new generator match the previous ordering so that diffs
|
||||||
remain readable while we debug schema content regressions. Remove once stable.
|
remain readable while we debug schema content regressions. Remove once stable.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LEGACY_PATH_ORDER = ['/v1/batches',
|
LEGACY_PATH_ORDER = [
|
||||||
'/v1/batches/{batch_id}',
|
"/v1/batches",
|
||||||
'/v1/batches/{batch_id}/cancel',
|
"/v1/batches/{batch_id}",
|
||||||
'/v1/chat/completions',
|
"/v1/batches/{batch_id}/cancel",
|
||||||
'/v1/chat/completions/{completion_id}',
|
"/v1/chat/completions",
|
||||||
'/v1/completions',
|
"/v1/chat/completions/{completion_id}",
|
||||||
'/v1/conversations',
|
"/v1/completions",
|
||||||
'/v1/conversations/{conversation_id}',
|
"/v1/conversations",
|
||||||
'/v1/conversations/{conversation_id}/items',
|
"/v1/conversations/{conversation_id}",
|
||||||
'/v1/conversations/{conversation_id}/items/{item_id}',
|
"/v1/conversations/{conversation_id}/items",
|
||||||
'/v1/embeddings',
|
"/v1/conversations/{conversation_id}/items/{item_id}",
|
||||||
'/v1/files',
|
"/v1/embeddings",
|
||||||
'/v1/files/{file_id}',
|
"/v1/files",
|
||||||
'/v1/files/{file_id}/content',
|
"/v1/files/{file_id}",
|
||||||
'/v1/health',
|
"/v1/files/{file_id}/content",
|
||||||
'/v1/inspect/routes',
|
"/v1/health",
|
||||||
'/v1/models',
|
"/v1/inspect/routes",
|
||||||
'/v1/models/{model_id}',
|
"/v1/models",
|
||||||
'/v1/moderations',
|
"/v1/models/{model_id}",
|
||||||
'/v1/prompts',
|
"/v1/moderations",
|
||||||
'/v1/prompts/{prompt_id}',
|
"/v1/prompts",
|
||||||
'/v1/prompts/{prompt_id}/set-default-version',
|
"/v1/prompts/{prompt_id}",
|
||||||
'/v1/prompts/{prompt_id}/versions',
|
"/v1/prompts/{prompt_id}/set-default-version",
|
||||||
'/v1/providers',
|
"/v1/prompts/{prompt_id}/versions",
|
||||||
'/v1/providers/{provider_id}',
|
"/v1/providers",
|
||||||
'/v1/responses',
|
"/v1/providers/{provider_id}",
|
||||||
'/v1/responses/{response_id}',
|
"/v1/responses",
|
||||||
'/v1/responses/{response_id}/input_items',
|
"/v1/responses/{response_id}",
|
||||||
'/v1/safety/run-shield',
|
"/v1/responses/{response_id}/input_items",
|
||||||
'/v1/scoring-functions',
|
"/v1/safety/run-shield",
|
||||||
'/v1/scoring-functions/{scoring_fn_id}',
|
"/v1/scoring-functions",
|
||||||
'/v1/scoring/score',
|
"/v1/scoring-functions/{scoring_fn_id}",
|
||||||
'/v1/scoring/score-batch',
|
"/v1/scoring/score",
|
||||||
'/v1/shields',
|
"/v1/scoring/score-batch",
|
||||||
'/v1/shields/{identifier}',
|
"/v1/shields",
|
||||||
'/v1/tool-runtime/invoke',
|
"/v1/shields/{identifier}",
|
||||||
'/v1/tool-runtime/list-tools',
|
"/v1/tool-runtime/invoke",
|
||||||
'/v1/toolgroups',
|
"/v1/tool-runtime/list-tools",
|
||||||
'/v1/toolgroups/{toolgroup_id}',
|
"/v1/toolgroups",
|
||||||
'/v1/tools',
|
"/v1/toolgroups/{toolgroup_id}",
|
||||||
'/v1/tools/{tool_name}',
|
"/v1/tools",
|
||||||
'/v1/vector-io/insert',
|
"/v1/tools/{tool_name}",
|
||||||
'/v1/vector-io/query',
|
"/v1/vector-io/insert",
|
||||||
'/v1/vector_stores',
|
"/v1/vector-io/query",
|
||||||
'/v1/vector_stores/{vector_store_id}',
|
"/v1/vector_stores",
|
||||||
'/v1/vector_stores/{vector_store_id}/file_batches',
|
"/v1/vector_stores/{vector_store_id}",
|
||||||
'/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}',
|
"/v1/vector_stores/{vector_store_id}/file_batches",
|
||||||
'/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel',
|
"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
|
||||||
'/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files',
|
"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
|
||||||
'/v1/vector_stores/{vector_store_id}/files',
|
"/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
|
||||||
'/v1/vector_stores/{vector_store_id}/files/{file_id}',
|
"/v1/vector_stores/{vector_store_id}/files",
|
||||||
'/v1/vector_stores/{vector_store_id}/files/{file_id}/content',
|
"/v1/vector_stores/{vector_store_id}/files/{file_id}",
|
||||||
'/v1/vector_stores/{vector_store_id}/search',
|
"/v1/vector_stores/{vector_store_id}/files/{file_id}/content",
|
||||||
'/v1/version',
|
"/v1/vector_stores/{vector_store_id}/search",
|
||||||
'/v1beta/datasetio/append-rows/{dataset_id}',
|
"/v1/version",
|
||||||
'/v1beta/datasetio/iterrows/{dataset_id}',
|
"/v1beta/datasetio/append-rows/{dataset_id}",
|
||||||
'/v1beta/datasets',
|
"/v1beta/datasetio/iterrows/{dataset_id}",
|
||||||
'/v1beta/datasets/{dataset_id}',
|
"/v1beta/datasets",
|
||||||
'/v1alpha/eval/benchmarks',
|
"/v1beta/datasets/{dataset_id}",
|
||||||
'/v1alpha/eval/benchmarks/{benchmark_id}',
|
"/v1alpha/eval/benchmarks",
|
||||||
'/v1alpha/eval/benchmarks/{benchmark_id}/evaluations',
|
"/v1alpha/eval/benchmarks/{benchmark_id}",
|
||||||
'/v1alpha/eval/benchmarks/{benchmark_id}/jobs',
|
"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
|
||||||
'/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}',
|
"/v1alpha/eval/benchmarks/{benchmark_id}/jobs",
|
||||||
'/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result',
|
"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
|
||||||
'/v1alpha/inference/rerank',
|
"/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
|
||||||
'/v1alpha/post-training/job/artifacts',
|
"/v1alpha/inference/rerank",
|
||||||
'/v1alpha/post-training/job/cancel',
|
"/v1alpha/post-training/job/artifacts",
|
||||||
'/v1alpha/post-training/job/status',
|
"/v1alpha/post-training/job/cancel",
|
||||||
'/v1alpha/post-training/jobs',
|
"/v1alpha/post-training/job/status",
|
||||||
'/v1alpha/post-training/preference-optimize',
|
"/v1alpha/post-training/jobs",
|
||||||
'/v1alpha/post-training/supervised-fine-tune']
|
"/v1alpha/post-training/preference-optimize",
|
||||||
|
"/v1alpha/post-training/supervised-fine-tune",
|
||||||
|
]
|
||||||
|
|
||||||
LEGACY_SCHEMA_ORDER = ['Error',
|
LEGACY_SCHEMA_ORDER = [
|
||||||
'ListBatchesResponse',
|
"Error",
|
||||||
'CreateBatchRequest',
|
"ListBatchesResponse",
|
||||||
'Batch',
|
"CreateBatchRequest",
|
||||||
'Order',
|
"Batch",
|
||||||
'ListOpenAIChatCompletionResponse',
|
"Order",
|
||||||
'OpenAIAssistantMessageParam',
|
"ListOpenAIChatCompletionResponse",
|
||||||
'OpenAIChatCompletionContentPartImageParam',
|
"OpenAIAssistantMessageParam",
|
||||||
'OpenAIChatCompletionContentPartParam',
|
"OpenAIChatCompletionContentPartImageParam",
|
||||||
'OpenAIChatCompletionContentPartTextParam',
|
"OpenAIChatCompletionContentPartParam",
|
||||||
'OpenAIChatCompletionToolCall',
|
"OpenAIChatCompletionContentPartTextParam",
|
||||||
'OpenAIChatCompletionToolCallFunction',
|
"OpenAIChatCompletionToolCall",
|
||||||
'OpenAIChatCompletionUsage',
|
"OpenAIChatCompletionToolCallFunction",
|
||||||
'OpenAIChoice',
|
"OpenAIChatCompletionUsage",
|
||||||
'OpenAIChoiceLogprobs',
|
"OpenAIChoice",
|
||||||
'OpenAIDeveloperMessageParam',
|
"OpenAIChoiceLogprobs",
|
||||||
'OpenAIFile',
|
"OpenAIDeveloperMessageParam",
|
||||||
'OpenAIFileFile',
|
"OpenAIFile",
|
||||||
'OpenAIImageURL',
|
"OpenAIFileFile",
|
||||||
'OpenAIMessageParam',
|
"OpenAIImageURL",
|
||||||
'OpenAISystemMessageParam',
|
"OpenAIMessageParam",
|
||||||
'OpenAITokenLogProb',
|
"OpenAISystemMessageParam",
|
||||||
'OpenAIToolMessageParam',
|
"OpenAITokenLogProb",
|
||||||
'OpenAITopLogProb',
|
"OpenAIToolMessageParam",
|
||||||
'OpenAIUserMessageParam',
|
"OpenAITopLogProb",
|
||||||
'OpenAIJSONSchema',
|
"OpenAIUserMessageParam",
|
||||||
'OpenAIResponseFormatJSONObject',
|
"OpenAIJSONSchema",
|
||||||
'OpenAIResponseFormatJSONSchema',
|
"OpenAIResponseFormatJSONObject",
|
||||||
'OpenAIResponseFormatParam',
|
"OpenAIResponseFormatJSONSchema",
|
||||||
'OpenAIResponseFormatText',
|
"OpenAIResponseFormatParam",
|
||||||
'OpenAIChatCompletionRequestWithExtraBody',
|
"OpenAIResponseFormatText",
|
||||||
'OpenAIChatCompletion',
|
"OpenAIChatCompletionRequestWithExtraBody",
|
||||||
'OpenAIChatCompletionChunk',
|
"OpenAIChatCompletion",
|
||||||
'OpenAIChoiceDelta',
|
"OpenAIChatCompletionChunk",
|
||||||
'OpenAIChunkChoice',
|
"OpenAIChoiceDelta",
|
||||||
'OpenAICompletionWithInputMessages',
|
"OpenAIChunkChoice",
|
||||||
'OpenAICompletionRequestWithExtraBody',
|
"OpenAICompletionWithInputMessages",
|
||||||
'OpenAICompletion',
|
"OpenAICompletionRequestWithExtraBody",
|
||||||
'OpenAICompletionChoice',
|
"OpenAICompletion",
|
||||||
'ConversationItem',
|
"OpenAICompletionChoice",
|
||||||
'OpenAIResponseAnnotationCitation',
|
"ConversationItem",
|
||||||
'OpenAIResponseAnnotationContainerFileCitation',
|
"OpenAIResponseAnnotationCitation",
|
||||||
'OpenAIResponseAnnotationFileCitation',
|
"OpenAIResponseAnnotationContainerFileCitation",
|
||||||
'OpenAIResponseAnnotationFilePath',
|
"OpenAIResponseAnnotationFileCitation",
|
||||||
'OpenAIResponseAnnotations',
|
"OpenAIResponseAnnotationFilePath",
|
||||||
'OpenAIResponseContentPartRefusal',
|
"OpenAIResponseAnnotations",
|
||||||
'OpenAIResponseInputFunctionToolCallOutput',
|
"OpenAIResponseContentPartRefusal",
|
||||||
'OpenAIResponseInputMessageContent',
|
"OpenAIResponseInputFunctionToolCallOutput",
|
||||||
'OpenAIResponseInputMessageContentFile',
|
"OpenAIResponseInputMessageContent",
|
||||||
'OpenAIResponseInputMessageContentImage',
|
"OpenAIResponseInputMessageContentFile",
|
||||||
'OpenAIResponseInputMessageContentText',
|
"OpenAIResponseInputMessageContentImage",
|
||||||
'OpenAIResponseMCPApprovalRequest',
|
"OpenAIResponseInputMessageContentText",
|
||||||
'OpenAIResponseMCPApprovalResponse',
|
"OpenAIResponseMCPApprovalRequest",
|
||||||
'OpenAIResponseMessage',
|
"OpenAIResponseMCPApprovalResponse",
|
||||||
'OpenAIResponseOutputMessageContent',
|
"OpenAIResponseMessage",
|
||||||
'OpenAIResponseOutputMessageContentOutputText',
|
"OpenAIResponseOutputMessageContent",
|
||||||
'OpenAIResponseOutputMessageFileSearchToolCall',
|
"OpenAIResponseOutputMessageContentOutputText",
|
||||||
'OpenAIResponseOutputMessageFunctionToolCall',
|
"OpenAIResponseOutputMessageFileSearchToolCall",
|
||||||
'OpenAIResponseOutputMessageMCPCall',
|
"OpenAIResponseOutputMessageFunctionToolCall",
|
||||||
'OpenAIResponseOutputMessageMCPListTools',
|
"OpenAIResponseOutputMessageMCPCall",
|
||||||
'OpenAIResponseOutputMessageWebSearchToolCall',
|
"OpenAIResponseOutputMessageMCPListTools",
|
||||||
'CreateConversationRequest',
|
"OpenAIResponseOutputMessageWebSearchToolCall",
|
||||||
'Conversation',
|
"CreateConversationRequest",
|
||||||
'UpdateConversationRequest',
|
"Conversation",
|
||||||
'ConversationDeletedResource',
|
"UpdateConversationRequest",
|
||||||
'ConversationItemList',
|
"ConversationDeletedResource",
|
||||||
'AddItemsRequest',
|
"ConversationItemList",
|
||||||
'ConversationItemDeletedResource',
|
"AddItemsRequest",
|
||||||
'OpenAIEmbeddingsRequestWithExtraBody',
|
"ConversationItemDeletedResource",
|
||||||
'OpenAIEmbeddingData',
|
"OpenAIEmbeddingsRequestWithExtraBody",
|
||||||
'OpenAIEmbeddingUsage',
|
"OpenAIEmbeddingData",
|
||||||
'OpenAIEmbeddingsResponse',
|
"OpenAIEmbeddingUsage",
|
||||||
'OpenAIFilePurpose',
|
"OpenAIEmbeddingsResponse",
|
||||||
'ListOpenAIFileResponse',
|
"OpenAIFilePurpose",
|
||||||
'OpenAIFileObject',
|
"ListOpenAIFileResponse",
|
||||||
'ExpiresAfter',
|
"OpenAIFileObject",
|
||||||
'OpenAIFileDeleteResponse',
|
"ExpiresAfter",
|
||||||
'Response',
|
"OpenAIFileDeleteResponse",
|
||||||
'HealthInfo',
|
"Response",
|
||||||
'RouteInfo',
|
"HealthInfo",
|
||||||
'ListRoutesResponse',
|
"RouteInfo",
|
||||||
'OpenAIModel',
|
"ListRoutesResponse",
|
||||||
'OpenAIListModelsResponse',
|
"OpenAIModel",
|
||||||
'Model',
|
"OpenAIListModelsResponse",
|
||||||
'ModelType',
|
"Model",
|
||||||
'RunModerationRequest',
|
"ModelType",
|
||||||
'ModerationObject',
|
"RunModerationRequest",
|
||||||
'ModerationObjectResults',
|
"ModerationObject",
|
||||||
'Prompt',
|
"ModerationObjectResults",
|
||||||
'ListPromptsResponse',
|
"Prompt",
|
||||||
'CreatePromptRequest',
|
"ListPromptsResponse",
|
||||||
'UpdatePromptRequest',
|
"CreatePromptRequest",
|
||||||
'SetDefaultVersionRequest',
|
"UpdatePromptRequest",
|
||||||
'ProviderInfo',
|
"SetDefaultVersionRequest",
|
||||||
'ListProvidersResponse',
|
"ProviderInfo",
|
||||||
'ListOpenAIResponseObject',
|
"ListProvidersResponse",
|
||||||
'OpenAIResponseError',
|
"ListOpenAIResponseObject",
|
||||||
'OpenAIResponseInput',
|
"OpenAIResponseError",
|
||||||
'OpenAIResponseInputToolFileSearch',
|
"OpenAIResponseInput",
|
||||||
'OpenAIResponseInputToolFunction',
|
"OpenAIResponseInputToolFileSearch",
|
||||||
'OpenAIResponseInputToolWebSearch',
|
"OpenAIResponseInputToolFunction",
|
||||||
'OpenAIResponseObjectWithInput',
|
"OpenAIResponseInputToolWebSearch",
|
||||||
'OpenAIResponseOutput',
|
"OpenAIResponseObjectWithInput",
|
||||||
'OpenAIResponsePrompt',
|
"OpenAIResponseOutput",
|
||||||
'OpenAIResponseText',
|
"OpenAIResponsePrompt",
|
||||||
'OpenAIResponseTool',
|
"OpenAIResponseText",
|
||||||
'OpenAIResponseToolMCP',
|
"OpenAIResponseTool",
|
||||||
'OpenAIResponseUsage',
|
"OpenAIResponseToolMCP",
|
||||||
'ResponseGuardrailSpec',
|
"OpenAIResponseUsage",
|
||||||
'OpenAIResponseInputTool',
|
"ResponseGuardrailSpec",
|
||||||
'OpenAIResponseInputToolMCP',
|
"OpenAIResponseInputTool",
|
||||||
'CreateOpenaiResponseRequest',
|
"OpenAIResponseInputToolMCP",
|
||||||
'OpenAIResponseObject',
|
"CreateOpenaiResponseRequest",
|
||||||
'OpenAIResponseContentPartOutputText',
|
"OpenAIResponseObject",
|
||||||
'OpenAIResponseContentPartReasoningSummary',
|
"OpenAIResponseContentPartOutputText",
|
||||||
'OpenAIResponseContentPartReasoningText',
|
"OpenAIResponseContentPartReasoningSummary",
|
||||||
'OpenAIResponseObjectStream',
|
"OpenAIResponseContentPartReasoningText",
|
||||||
'OpenAIResponseObjectStreamResponseCompleted',
|
"OpenAIResponseObjectStream",
|
||||||
'OpenAIResponseObjectStreamResponseContentPartAdded',
|
"OpenAIResponseObjectStreamResponseCompleted",
|
||||||
'OpenAIResponseObjectStreamResponseContentPartDone',
|
"OpenAIResponseObjectStreamResponseContentPartAdded",
|
||||||
'OpenAIResponseObjectStreamResponseCreated',
|
"OpenAIResponseObjectStreamResponseContentPartDone",
|
||||||
'OpenAIResponseObjectStreamResponseFailed',
|
"OpenAIResponseObjectStreamResponseCreated",
|
||||||
'OpenAIResponseObjectStreamResponseFileSearchCallCompleted',
|
"OpenAIResponseObjectStreamResponseFailed",
|
||||||
'OpenAIResponseObjectStreamResponseFileSearchCallInProgress',
|
"OpenAIResponseObjectStreamResponseFileSearchCallCompleted",
|
||||||
'OpenAIResponseObjectStreamResponseFileSearchCallSearching',
|
"OpenAIResponseObjectStreamResponseFileSearchCallInProgress",
|
||||||
'OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta',
|
"OpenAIResponseObjectStreamResponseFileSearchCallSearching",
|
||||||
'OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone',
|
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta",
|
||||||
'OpenAIResponseObjectStreamResponseInProgress',
|
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone",
|
||||||
'OpenAIResponseObjectStreamResponseIncomplete',
|
"OpenAIResponseObjectStreamResponseInProgress",
|
||||||
'OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta',
|
"OpenAIResponseObjectStreamResponseIncomplete",
|
||||||
'OpenAIResponseObjectStreamResponseMcpCallArgumentsDone',
|
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta",
|
||||||
'OpenAIResponseObjectStreamResponseMcpCallCompleted',
|
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDone",
|
||||||
'OpenAIResponseObjectStreamResponseMcpCallFailed',
|
"OpenAIResponseObjectStreamResponseMcpCallCompleted",
|
||||||
'OpenAIResponseObjectStreamResponseMcpCallInProgress',
|
"OpenAIResponseObjectStreamResponseMcpCallFailed",
|
||||||
'OpenAIResponseObjectStreamResponseMcpListToolsCompleted',
|
"OpenAIResponseObjectStreamResponseMcpCallInProgress",
|
||||||
'OpenAIResponseObjectStreamResponseMcpListToolsFailed',
|
"OpenAIResponseObjectStreamResponseMcpListToolsCompleted",
|
||||||
'OpenAIResponseObjectStreamResponseMcpListToolsInProgress',
|
"OpenAIResponseObjectStreamResponseMcpListToolsFailed",
|
||||||
'OpenAIResponseObjectStreamResponseOutputItemAdded',
|
"OpenAIResponseObjectStreamResponseMcpListToolsInProgress",
|
||||||
'OpenAIResponseObjectStreamResponseOutputItemDone',
|
"OpenAIResponseObjectStreamResponseOutputItemAdded",
|
||||||
'OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded',
|
"OpenAIResponseObjectStreamResponseOutputItemDone",
|
||||||
'OpenAIResponseObjectStreamResponseOutputTextDelta',
|
"OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded",
|
||||||
'OpenAIResponseObjectStreamResponseOutputTextDone',
|
"OpenAIResponseObjectStreamResponseOutputTextDelta",
|
||||||
'OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded',
|
"OpenAIResponseObjectStreamResponseOutputTextDone",
|
||||||
'OpenAIResponseObjectStreamResponseReasoningSummaryPartDone',
|
"OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded",
|
||||||
'OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta',
|
"OpenAIResponseObjectStreamResponseReasoningSummaryPartDone",
|
||||||
'OpenAIResponseObjectStreamResponseReasoningSummaryTextDone',
|
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta",
|
||||||
'OpenAIResponseObjectStreamResponseReasoningTextDelta',
|
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDone",
|
||||||
'OpenAIResponseObjectStreamResponseReasoningTextDone',
|
"OpenAIResponseObjectStreamResponseReasoningTextDelta",
|
||||||
'OpenAIResponseObjectStreamResponseRefusalDelta',
|
"OpenAIResponseObjectStreamResponseReasoningTextDone",
|
||||||
'OpenAIResponseObjectStreamResponseRefusalDone',
|
"OpenAIResponseObjectStreamResponseRefusalDelta",
|
||||||
'OpenAIResponseObjectStreamResponseWebSearchCallCompleted',
|
"OpenAIResponseObjectStreamResponseRefusalDone",
|
||||||
'OpenAIResponseObjectStreamResponseWebSearchCallInProgress',
|
"OpenAIResponseObjectStreamResponseWebSearchCallCompleted",
|
||||||
'OpenAIResponseObjectStreamResponseWebSearchCallSearching',
|
"OpenAIResponseObjectStreamResponseWebSearchCallInProgress",
|
||||||
'OpenAIDeleteResponseObject',
|
"OpenAIResponseObjectStreamResponseWebSearchCallSearching",
|
||||||
'ListOpenAIResponseInputItem',
|
"OpenAIDeleteResponseObject",
|
||||||
'RunShieldRequest',
|
"ListOpenAIResponseInputItem",
|
||||||
'RunShieldResponse',
|
"RunShieldRequest",
|
||||||
'SafetyViolation',
|
"RunShieldResponse",
|
||||||
'ViolationLevel',
|
"SafetyViolation",
|
||||||
'AggregationFunctionType',
|
"ViolationLevel",
|
||||||
'ArrayType',
|
"AggregationFunctionType",
|
||||||
'BasicScoringFnParams',
|
"ArrayType",
|
||||||
'BooleanType',
|
"BasicScoringFnParams",
|
||||||
'ChatCompletionInputType',
|
"BooleanType",
|
||||||
'CompletionInputType',
|
"ChatCompletionInputType",
|
||||||
'JsonType',
|
"CompletionInputType",
|
||||||
'LLMAsJudgeScoringFnParams',
|
"JsonType",
|
||||||
'NumberType',
|
"LLMAsJudgeScoringFnParams",
|
||||||
'ObjectType',
|
"NumberType",
|
||||||
'RegexParserScoringFnParams',
|
"ObjectType",
|
||||||
'ScoringFn',
|
"RegexParserScoringFnParams",
|
||||||
'ScoringFnParams',
|
"ScoringFn",
|
||||||
'ScoringFnParamsType',
|
"ScoringFnParams",
|
||||||
'StringType',
|
"ScoringFnParamsType",
|
||||||
'UnionType',
|
"StringType",
|
||||||
'ListScoringFunctionsResponse',
|
"UnionType",
|
||||||
'ScoreRequest',
|
"ListScoringFunctionsResponse",
|
||||||
'ScoreResponse',
|
"ScoreRequest",
|
||||||
'ScoringResult',
|
"ScoreResponse",
|
||||||
'ScoreBatchRequest',
|
"ScoringResult",
|
||||||
'ScoreBatchResponse',
|
"ScoreBatchRequest",
|
||||||
'Shield',
|
"ScoreBatchResponse",
|
||||||
'ListShieldsResponse',
|
"Shield",
|
||||||
'InvokeToolRequest',
|
"ListShieldsResponse",
|
||||||
'ImageContentItem',
|
"InvokeToolRequest",
|
||||||
'InterleavedContent',
|
"ImageContentItem",
|
||||||
'InterleavedContentItem',
|
"InterleavedContent",
|
||||||
'TextContentItem',
|
"InterleavedContentItem",
|
||||||
'ToolInvocationResult',
|
"TextContentItem",
|
||||||
'URL',
|
"ToolInvocationResult",
|
||||||
'ToolDef',
|
"URL",
|
||||||
'ListToolDefsResponse',
|
"ToolDef",
|
||||||
'ToolGroup',
|
"ListToolDefsResponse",
|
||||||
'ListToolGroupsResponse',
|
"ToolGroup",
|
||||||
'Chunk',
|
"ListToolGroupsResponse",
|
||||||
'ChunkMetadata',
|
"Chunk",
|
||||||
'InsertChunksRequest',
|
"ChunkMetadata",
|
||||||
'QueryChunksRequest',
|
"InsertChunksRequest",
|
||||||
'QueryChunksResponse',
|
"QueryChunksRequest",
|
||||||
'VectorStoreFileCounts',
|
"QueryChunksResponse",
|
||||||
'VectorStoreListResponse',
|
"VectorStoreFileCounts",
|
||||||
'VectorStoreObject',
|
"VectorStoreListResponse",
|
||||||
'VectorStoreChunkingStrategy',
|
"VectorStoreObject",
|
||||||
'VectorStoreChunkingStrategyAuto',
|
"VectorStoreChunkingStrategy",
|
||||||
'VectorStoreChunkingStrategyStatic',
|
"VectorStoreChunkingStrategyAuto",
|
||||||
'VectorStoreChunkingStrategyStaticConfig',
|
"VectorStoreChunkingStrategyStatic",
|
||||||
'OpenAICreateVectorStoreRequestWithExtraBody',
|
"VectorStoreChunkingStrategyStaticConfig",
|
||||||
'OpenaiUpdateVectorStoreRequest',
|
"OpenAICreateVectorStoreRequestWithExtraBody",
|
||||||
'VectorStoreDeleteResponse',
|
"OpenaiUpdateVectorStoreRequest",
|
||||||
'OpenAICreateVectorStoreFileBatchRequestWithExtraBody',
|
"VectorStoreDeleteResponse",
|
||||||
'VectorStoreFileBatchObject',
|
"OpenAICreateVectorStoreFileBatchRequestWithExtraBody",
|
||||||
'VectorStoreFileStatus',
|
"VectorStoreFileBatchObject",
|
||||||
'VectorStoreFileLastError',
|
"VectorStoreFileStatus",
|
||||||
'VectorStoreFileObject',
|
"VectorStoreFileLastError",
|
||||||
'VectorStoreFilesListInBatchResponse',
|
"VectorStoreFileObject",
|
||||||
'VectorStoreListFilesResponse',
|
"VectorStoreFilesListInBatchResponse",
|
||||||
'OpenaiAttachFileToVectorStoreRequest',
|
"VectorStoreListFilesResponse",
|
||||||
'OpenaiUpdateVectorStoreFileRequest',
|
"OpenaiAttachFileToVectorStoreRequest",
|
||||||
'VectorStoreFileDeleteResponse',
|
"OpenaiUpdateVectorStoreFileRequest",
|
||||||
'bool',
|
"VectorStoreFileDeleteResponse",
|
||||||
'VectorStoreContent',
|
"bool",
|
||||||
'VectorStoreFileContentResponse',
|
"VectorStoreContent",
|
||||||
'OpenaiSearchVectorStoreRequest',
|
"VectorStoreFileContentResponse",
|
||||||
'VectorStoreSearchResponse',
|
"OpenaiSearchVectorStoreRequest",
|
||||||
'VectorStoreSearchResponsePage',
|
"VectorStoreSearchResponse",
|
||||||
'VersionInfo',
|
"VectorStoreSearchResponsePage",
|
||||||
'AppendRowsRequest',
|
"VersionInfo",
|
||||||
'PaginatedResponse',
|
"AppendRowsRequest",
|
||||||
'Dataset',
|
"PaginatedResponse",
|
||||||
'RowsDataSource',
|
"Dataset",
|
||||||
'URIDataSource',
|
"RowsDataSource",
|
||||||
'ListDatasetsResponse',
|
"URIDataSource",
|
||||||
'Benchmark',
|
"ListDatasetsResponse",
|
||||||
'ListBenchmarksResponse',
|
"Benchmark",
|
||||||
'BenchmarkConfig',
|
"ListBenchmarksResponse",
|
||||||
'GreedySamplingStrategy',
|
"BenchmarkConfig",
|
||||||
'ModelCandidate',
|
"GreedySamplingStrategy",
|
||||||
'SamplingParams',
|
"ModelCandidate",
|
||||||
'SystemMessage',
|
"SamplingParams",
|
||||||
'TopKSamplingStrategy',
|
"SystemMessage",
|
||||||
'TopPSamplingStrategy',
|
"TopKSamplingStrategy",
|
||||||
'EvaluateRowsRequest',
|
"TopPSamplingStrategy",
|
||||||
'EvaluateResponse',
|
"EvaluateRowsRequest",
|
||||||
'RunEvalRequest',
|
"EvaluateResponse",
|
||||||
'Job',
|
"RunEvalRequest",
|
||||||
'RerankRequest',
|
"Job",
|
||||||
'RerankData',
|
"RerankRequest",
|
||||||
'RerankResponse',
|
"RerankData",
|
||||||
'Checkpoint',
|
"RerankResponse",
|
||||||
'PostTrainingJobArtifactsResponse',
|
"Checkpoint",
|
||||||
'PostTrainingMetric',
|
"PostTrainingJobArtifactsResponse",
|
||||||
'CancelTrainingJobRequest',
|
"PostTrainingMetric",
|
||||||
'PostTrainingJobStatusResponse',
|
"CancelTrainingJobRequest",
|
||||||
'ListPostTrainingJobsResponse',
|
"PostTrainingJobStatusResponse",
|
||||||
'DPOAlignmentConfig',
|
"ListPostTrainingJobsResponse",
|
||||||
'DPOLossType',
|
"DPOAlignmentConfig",
|
||||||
'DataConfig',
|
"DPOLossType",
|
||||||
'DatasetFormat',
|
"DataConfig",
|
||||||
'EfficiencyConfig',
|
"DatasetFormat",
|
||||||
'OptimizerConfig',
|
"EfficiencyConfig",
|
||||||
'OptimizerType',
|
"OptimizerConfig",
|
||||||
'TrainingConfig',
|
"OptimizerType",
|
||||||
'PreferenceOptimizeRequest',
|
"TrainingConfig",
|
||||||
'PostTrainingJob',
|
"PreferenceOptimizeRequest",
|
||||||
'AlgorithmConfig',
|
"PostTrainingJob",
|
||||||
'LoraFinetuningConfig',
|
"AlgorithmConfig",
|
||||||
'QATFinetuningConfig',
|
"LoraFinetuningConfig",
|
||||||
'SupervisedFineTuneRequest',
|
"QATFinetuningConfig",
|
||||||
'RegisterModelRequest',
|
"SupervisedFineTuneRequest",
|
||||||
'ParamType',
|
"RegisterModelRequest",
|
||||||
'RegisterScoringFunctionRequest',
|
"ParamType",
|
||||||
'RegisterShieldRequest',
|
"RegisterScoringFunctionRequest",
|
||||||
'RegisterToolGroupRequest',
|
"RegisterShieldRequest",
|
||||||
'DataSource',
|
"RegisterToolGroupRequest",
|
||||||
'RegisterDatasetRequest',
|
"DataSource",
|
||||||
'RegisterBenchmarkRequest']
|
"RegisterDatasetRequest",
|
||||||
|
"RegisterBenchmarkRequest",
|
||||||
|
]
|
||||||
|
|
||||||
LEGACY_RESPONSE_ORDER = ['BadRequest400', 'TooManyRequests429', 'InternalServerError500', 'DefaultError']
|
LEGACY_RESPONSE_ORDER = ["BadRequest400", "TooManyRequests429", "InternalServerError500", "DefaultError"]
|
||||||
|
|
||||||
LEGACY_TAGS = [{'description': 'APIs for creating and interacting with agentic systems.',
|
LEGACY_TAGS = [
|
||||||
'name': 'Agents',
|
{
|
||||||
'x-displayName': 'Agents'},
|
"description": "APIs for creating and interacting with agentic systems.",
|
||||||
{'description': 'The API is designed to allow use of openai client libraries for seamless integration.\n'
|
"name": "Agents",
|
||||||
'\n'
|
"x-displayName": "Agents",
|
||||||
'This API provides the following extensions:\n'
|
},
|
||||||
' - idempotent batch creation\n'
|
{
|
||||||
'\n'
|
"description": "The API is designed to allow use of openai client libraries for seamless integration.\n"
|
||||||
'Note: This API is currently under active development and may undergo changes.',
|
"\n"
|
||||||
'name': 'Batches',
|
"This API provides the following extensions:\n"
|
||||||
'x-displayName': 'The Batches API enables efficient processing of multiple requests in a single operation, '
|
" - idempotent batch creation\n"
|
||||||
'particularly useful for processing large datasets, batch evaluation workflows, and cost-effective '
|
"\n"
|
||||||
'inference at scale.'},
|
"Note: This API is currently under active development and may undergo changes.",
|
||||||
{'description': '', 'name': 'Benchmarks'},
|
"name": "Batches",
|
||||||
{'description': 'Protocol for conversation management operations.',
|
"x-displayName": "The Batches API enables efficient processing of multiple requests in a single operation, "
|
||||||
'name': 'Conversations',
|
"particularly useful for processing large datasets, batch evaluation workflows, and cost-effective "
|
||||||
'x-displayName': 'Conversations'},
|
"inference at scale.",
|
||||||
{'description': '', 'name': 'DatasetIO'},
|
},
|
||||||
{'description': '', 'name': 'Datasets'},
|
{"description": "", "name": "Benchmarks"},
|
||||||
{'description': 'Llama Stack Evaluation API for running evaluations on model and agent candidates.',
|
{
|
||||||
'name': 'Eval',
|
"description": "Protocol for conversation management operations.",
|
||||||
'x-displayName': 'Evaluations'},
|
"name": "Conversations",
|
||||||
{'description': 'This API is used to upload documents that can be used with other Llama Stack APIs.',
|
"x-displayName": "Conversations",
|
||||||
'name': 'Files',
|
},
|
||||||
'x-displayName': 'Files'},
|
{"description": "", "name": "DatasetIO"},
|
||||||
{'description': 'Llama Stack Inference API for generating completions, chat completions, and embeddings.\n'
|
{"description": "", "name": "Datasets"},
|
||||||
'\n'
|
{
|
||||||
'This API provides the raw interface to the underlying models. Three kinds of models are supported:\n'
|
"description": "Llama Stack Evaluation API for running evaluations on model and agent candidates.",
|
||||||
|
"name": "Eval",
|
||||||
|
"x-displayName": "Evaluations",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
|
||||||
|
"name": "Files",
|
||||||
|
"x-displayName": "Files",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n"
|
||||||
|
"\n"
|
||||||
|
"This API provides the raw interface to the underlying models. Three kinds of models are supported:\n"
|
||||||
'- LLM models: these models generate "raw" and "chat" (conversational) completions.\n'
|
'- LLM models: these models generate "raw" and "chat" (conversational) completions.\n'
|
||||||
'- Embedding models: these models generate embeddings to be used for semantic search.\n'
|
"- Embedding models: these models generate embeddings to be used for semantic search.\n"
|
||||||
'- Rerank models: these models reorder the documents based on their relevance to a query.',
|
"- Rerank models: these models reorder the documents based on their relevance to a query.",
|
||||||
'name': 'Inference',
|
"name": "Inference",
|
||||||
'x-displayName': 'Inference'},
|
"x-displayName": "Inference",
|
||||||
{'description': 'APIs for inspecting the Llama Stack service, including health status, available API routes with '
|
},
|
||||||
'methods and implementing providers.',
|
{
|
||||||
'name': 'Inspect',
|
"description": "APIs for inspecting the Llama Stack service, including health status, available API routes with "
|
||||||
'x-displayName': 'Inspect'},
|
"methods and implementing providers.",
|
||||||
{'description': '', 'name': 'Models'},
|
"name": "Inspect",
|
||||||
{'description': '', 'name': 'PostTraining (Coming Soon)'},
|
"x-displayName": "Inspect",
|
||||||
{'description': 'Protocol for prompt management operations.', 'name': 'Prompts', 'x-displayName': 'Prompts'},
|
},
|
||||||
{'description': 'Providers API for inspecting, listing, and modifying providers and their configurations.',
|
{"description": "", "name": "Models"},
|
||||||
'name': 'Providers',
|
{"description": "", "name": "PostTraining (Coming Soon)"},
|
||||||
'x-displayName': 'Providers'},
|
{"description": "Protocol for prompt management operations.", "name": "Prompts", "x-displayName": "Prompts"},
|
||||||
{'description': 'OpenAI-compatible Moderations API.', 'name': 'Safety', 'x-displayName': 'Safety'},
|
{
|
||||||
{'description': '', 'name': 'Scoring'},
|
"description": "Providers API for inspecting, listing, and modifying providers and their configurations.",
|
||||||
{'description': '', 'name': 'ScoringFunctions'},
|
"name": "Providers",
|
||||||
{'description': '', 'name': 'Shields'},
|
"x-displayName": "Providers",
|
||||||
{'description': '', 'name': 'ToolGroups'},
|
},
|
||||||
{'description': '', 'name': 'ToolRuntime'},
|
{"description": "OpenAI-compatible Moderations API.", "name": "Safety", "x-displayName": "Safety"},
|
||||||
{'description': '', 'name': 'VectorIO'}]
|
{"description": "", "name": "Scoring"},
|
||||||
|
{"description": "", "name": "ScoringFunctions"},
|
||||||
|
{"description": "", "name": "Shields"},
|
||||||
|
{"description": "", "name": "ToolGroups"},
|
||||||
|
{"description": "", "name": "ToolRuntime"},
|
||||||
|
{"description": "", "name": "VectorIO"},
|
||||||
|
]
|
||||||
|
|
||||||
LEGACY_TAG_ORDER = ['Agents',
|
LEGACY_TAG_ORDER = [
|
||||||
'Batches',
|
"Agents",
|
||||||
'Benchmarks',
|
"Batches",
|
||||||
'Conversations',
|
"Benchmarks",
|
||||||
'DatasetIO',
|
"Conversations",
|
||||||
'Datasets',
|
"DatasetIO",
|
||||||
'Eval',
|
"Datasets",
|
||||||
'Files',
|
"Eval",
|
||||||
'Inference',
|
"Files",
|
||||||
'Inspect',
|
"Inference",
|
||||||
'Models',
|
"Inspect",
|
||||||
'PostTraining (Coming Soon)',
|
"Models",
|
||||||
'Prompts',
|
"PostTraining (Coming Soon)",
|
||||||
'Providers',
|
"Prompts",
|
||||||
'Safety',
|
"Providers",
|
||||||
'Scoring',
|
"Safety",
|
||||||
'ScoringFunctions',
|
"Scoring",
|
||||||
'Shields',
|
"ScoringFunctions",
|
||||||
'ToolGroups',
|
"Shields",
|
||||||
'ToolRuntime',
|
"ToolGroups",
|
||||||
'VectorIO']
|
"ToolRuntime",
|
||||||
|
"VectorIO",
|
||||||
|
]
|
||||||
|
|
||||||
LEGACY_TAG_GROUPS = [{'name': 'Operations',
|
LEGACY_TAG_GROUPS = [
|
||||||
'tags': ['Agents',
|
{
|
||||||
'Batches',
|
"name": "Operations",
|
||||||
'Benchmarks',
|
"tags": [
|
||||||
'Conversations',
|
"Agents",
|
||||||
'DatasetIO',
|
"Batches",
|
||||||
'Datasets',
|
"Benchmarks",
|
||||||
'Eval',
|
"Conversations",
|
||||||
'Files',
|
"DatasetIO",
|
||||||
'Inference',
|
"Datasets",
|
||||||
'Inspect',
|
"Eval",
|
||||||
'Models',
|
"Files",
|
||||||
'PostTraining (Coming Soon)',
|
"Inference",
|
||||||
'Prompts',
|
"Inspect",
|
||||||
'Providers',
|
"Models",
|
||||||
'Safety',
|
"PostTraining (Coming Soon)",
|
||||||
'Scoring',
|
"Prompts",
|
||||||
'ScoringFunctions',
|
"Providers",
|
||||||
'Shields',
|
"Safety",
|
||||||
'ToolGroups',
|
"Scoring",
|
||||||
'ToolRuntime',
|
"ScoringFunctions",
|
||||||
'VectorIO']}]
|
"Shields",
|
||||||
|
"ToolGroups",
|
||||||
|
"ToolRuntime",
|
||||||
|
"VectorIO",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
LEGACY_SECURITY = [{'Default': []}]
|
LEGACY_SECURITY = [{"Default": []}]
|
||||||
|
|
||||||
LEGACY_OPERATION_KEYS = [
|
LEGACY_OPERATION_KEYS = [
|
||||||
'responses',
|
"responses",
|
||||||
'tags',
|
"tags",
|
||||||
'summary',
|
"summary",
|
||||||
'description',
|
"description",
|
||||||
'operationId',
|
"operationId",
|
||||||
'parameters',
|
"parameters",
|
||||||
'requestBody',
|
"requestBody",
|
||||||
'deprecated',
|
"deprecated",
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ from pydantic import Field, create_model
|
||||||
|
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack_api import Api
|
from llama_stack_api import Api
|
||||||
|
from llama_stack_api.schema_utils import get_registered_schema_info
|
||||||
|
|
||||||
from . import app as app_module
|
from . import app as app_module
|
||||||
from .state import _extra_body_fields, register_dynamic_model
|
from .state import _extra_body_fields, register_dynamic_model
|
||||||
|
|
@ -31,23 +32,16 @@ def _to_pascal_case(segment: str) -> str:
|
||||||
return "".join(token.capitalize() for token in tokens if token)
|
return "".join(token.capitalize() for token in tokens if token)
|
||||||
|
|
||||||
|
|
||||||
def _compose_request_model_name(webmethod, http_method: str, variant: str | None = None) -> str:
|
def _compose_request_model_name(api: Api, method_name: str, variant: str | None = None) -> str:
|
||||||
segments = []
|
"""Generate a deterministic model name from the protocol method."""
|
||||||
level = (webmethod.level or "").lower()
|
|
||||||
if level and level != "v1":
|
def _to_pascal_from_snake(value: str) -> str:
|
||||||
segments.append(_to_pascal_case(str(webmethod.level)))
|
return "".join(segment.capitalize() for segment in value.split("_") if segment)
|
||||||
for part in filter(None, webmethod.route.split("/")):
|
|
||||||
lower_part = part.lower()
|
base_name = _to_pascal_from_snake(method_name)
|
||||||
if lower_part in {"v1", "v1alpha", "v1beta"}:
|
if not base_name:
|
||||||
continue
|
base_name = _to_pascal_case(api.value)
|
||||||
if part.startswith("{"):
|
base_name = f"{base_name}Request"
|
||||||
param = part[1:].split(":", 1)[0]
|
|
||||||
segments.append(f"By{_to_pascal_case(param)}")
|
|
||||||
else:
|
|
||||||
segments.append(_to_pascal_case(part))
|
|
||||||
if not segments:
|
|
||||||
segments.append("Root")
|
|
||||||
base_name = "".join(segments) + http_method.title() + "Request"
|
|
||||||
if variant:
|
if variant:
|
||||||
base_name = f"{base_name}{variant}"
|
base_name = f"{base_name}{variant}"
|
||||||
return base_name
|
return base_name
|
||||||
|
|
@ -130,6 +124,7 @@ def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_
|
||||||
def _create_dynamic_request_model(
|
def _create_dynamic_request_model(
|
||||||
api: Api,
|
api: Api,
|
||||||
webmethod,
|
webmethod,
|
||||||
|
method_name: str,
|
||||||
http_method: str,
|
http_method: str,
|
||||||
query_parameters: list[tuple[str, type, Any]],
|
query_parameters: list[tuple[str, type, Any]],
|
||||||
use_any: bool = False,
|
use_any: bool = False,
|
||||||
|
|
@ -140,7 +135,7 @@ def _create_dynamic_request_model(
|
||||||
field_definitions = _build_field_definitions(query_parameters, use_any)
|
field_definitions = _build_field_definitions(query_parameters, use_any)
|
||||||
if not field_definitions:
|
if not field_definitions:
|
||||||
return None
|
return None
|
||||||
model_name = _compose_request_model_name(webmethod, http_method, variant_suffix)
|
model_name = _compose_request_model_name(api, method_name, variant_suffix or None)
|
||||||
request_model = create_model(model_name, **field_definitions)
|
request_model = create_model(model_name, **field_definitions)
|
||||||
return register_dynamic_model(model_name, request_model)
|
return register_dynamic_model(model_name, request_model)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
@ -261,9 +256,7 @@ def _extract_response_models_from_union(union_type: Any) -> tuple[type | None, t
|
||||||
streaming_model = inner_type
|
streaming_model = inner_type
|
||||||
else:
|
else:
|
||||||
# Might be a registered schema - check if it's registered
|
# Might be a registered schema - check if it's registered
|
||||||
from llama_stack_api.schema_utils import _registered_schemas
|
if get_registered_schema_info(inner_type):
|
||||||
|
|
||||||
if inner_type in _registered_schemas:
|
|
||||||
# We'll need to look this up later, but for now store the type
|
# We'll need to look this up later, but for now store the type
|
||||||
streaming_model = inner_type
|
streaming_model = inner_type
|
||||||
elif hasattr(arg, "model_json_schema"):
|
elif hasattr(arg, "model_json_schema"):
|
||||||
|
|
@ -427,17 +420,28 @@ def _find_models_for_endpoint(
|
||||||
try:
|
try:
|
||||||
from fastapi import Response as FastAPIResponse
|
from fastapi import Response as FastAPIResponse
|
||||||
except ImportError:
|
except ImportError:
|
||||||
FastAPIResponse = None
|
fastapi_response_cls = None
|
||||||
|
else:
|
||||||
|
fastapi_response_cls = FastAPIResponse
|
||||||
try:
|
try:
|
||||||
from starlette.responses import Response as StarletteResponse
|
from starlette.responses import Response as StarletteResponse
|
||||||
except ImportError:
|
except ImportError:
|
||||||
StarletteResponse = None
|
starlette_response_cls = None
|
||||||
|
else:
|
||||||
|
starlette_response_cls = StarletteResponse
|
||||||
|
|
||||||
response_types = tuple(t for t in (FastAPIResponse, StarletteResponse) if t is not None)
|
response_types = tuple(t for t in (fastapi_response_cls, starlette_response_cls) if t is not None)
|
||||||
if response_types and any(return_annotation is t for t in response_types):
|
if response_types and any(return_annotation is t for t in response_types):
|
||||||
response_schema_name = "Response"
|
response_schema_name = "Response"
|
||||||
|
|
||||||
return request_model, response_model, query_parameters, file_form_params, streaming_response_model, response_schema_name
|
return (
|
||||||
|
request_model,
|
||||||
|
response_model,
|
||||||
|
query_parameters,
|
||||||
|
file_form_params,
|
||||||
|
streaming_response_model,
|
||||||
|
response_schema_name,
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
|
|
@ -465,9 +469,7 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
|
||||||
file_form_params,
|
file_form_params,
|
||||||
streaming_response_model,
|
streaming_response_model,
|
||||||
response_schema_name,
|
response_schema_name,
|
||||||
) = (
|
) = _find_models_for_endpoint(webmethod, api, name, is_post_put)
|
||||||
_find_models_for_endpoint(webmethod, api, name, is_post_put)
|
|
||||||
)
|
|
||||||
operation_description = _extract_operation_description_from_docstring(api, name)
|
operation_description = _extract_operation_description_from_docstring(api, name)
|
||||||
response_description = _extract_response_description_from_docstring(webmethod, response_model, api, name)
|
response_description = _extract_response_description_from_docstring(webmethod, response_model, api, name)
|
||||||
|
|
||||||
|
|
@ -479,6 +481,17 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
|
||||||
key = (fastapi_path, method.upper())
|
key = (fastapi_path, method.upper())
|
||||||
_extra_body_fields[key] = extra_body_params
|
_extra_body_fields[key] = extra_body_params
|
||||||
|
|
||||||
|
if is_post_put and not request_model and not file_form_params and query_parameters:
|
||||||
|
request_model = _create_dynamic_request_model(
|
||||||
|
api, webmethod, name, primary_method, query_parameters, use_any=False
|
||||||
|
)
|
||||||
|
if not request_model:
|
||||||
|
request_model = _create_dynamic_request_model(
|
||||||
|
api, webmethod, name, primary_method, query_parameters, use_any=True, variant_suffix="Loose"
|
||||||
|
)
|
||||||
|
if request_model:
|
||||||
|
query_parameters = []
|
||||||
|
|
||||||
if file_form_params and is_post_put:
|
if file_form_params and is_post_put:
|
||||||
signature_params = list(file_form_params)
|
signature_params = list(file_form_params)
|
||||||
param_annotations = {param.name: param.annotation for param in file_form_params}
|
param_annotations = {param.name: param.annotation for param in file_form_params}
|
||||||
|
|
@ -503,12 +516,16 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
|
||||||
endpoint_func = file_form_endpoint
|
endpoint_func = file_form_endpoint
|
||||||
elif request_model and response_model:
|
elif request_model and response_model:
|
||||||
endpoint_func = _create_endpoint_with_request_model(request_model, response_model, operation_description)
|
endpoint_func = _create_endpoint_with_request_model(request_model, response_model, operation_description)
|
||||||
|
elif request_model:
|
||||||
|
endpoint_func = _create_endpoint_with_request_model(request_model, None, operation_description)
|
||||||
elif response_model and query_parameters:
|
elif response_model and query_parameters:
|
||||||
if is_post_put:
|
if is_post_put:
|
||||||
request_model = _create_dynamic_request_model(api, webmethod, primary_method, query_parameters, use_any=False)
|
request_model = _create_dynamic_request_model(
|
||||||
|
api, webmethod, name, primary_method, query_parameters, use_any=False
|
||||||
|
)
|
||||||
if not request_model:
|
if not request_model:
|
||||||
request_model = _create_dynamic_request_model(
|
request_model = _create_dynamic_request_model(
|
||||||
api, webmethod, primary_method, query_parameters, use_any=True, variant_suffix="Loose"
|
api, webmethod, name, primary_method, query_parameters, use_any=True, variant_suffix="Loose"
|
||||||
)
|
)
|
||||||
|
|
||||||
if request_model:
|
if request_model:
|
||||||
|
|
@ -600,10 +617,8 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
|
||||||
streaming_schema_name = None
|
streaming_schema_name = None
|
||||||
# Check if it's a registered schema first (before checking __name__)
|
# Check if it's a registered schema first (before checking __name__)
|
||||||
# because registered schemas might be Annotated types
|
# because registered schemas might be Annotated types
|
||||||
from llama_stack_api.schema_utils import _registered_schemas
|
if schema_info := get_registered_schema_info(streaming_response_model):
|
||||||
|
streaming_schema_name = schema_info.name
|
||||||
if streaming_response_model in _registered_schemas:
|
|
||||||
streaming_schema_name = _registered_schemas[streaming_response_model]["name"]
|
|
||||||
elif hasattr(streaming_response_model, "__name__"):
|
elif hasattr(streaming_response_model, "__name__"):
|
||||||
streaming_schema_name = streaming_response_model.__name__
|
streaming_schema_name = streaming_response_model.__name__
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,11 +9,8 @@ Schema discovery and collection for OpenAPI generation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import importlib
|
import importlib
|
||||||
import pkgutil
|
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from .state import _dynamic_models
|
|
||||||
|
|
||||||
|
|
||||||
def _ensure_components_schemas(openapi_schema: dict[str, Any]) -> None:
|
def _ensure_components_schemas(openapi_schema: dict[str, Any]) -> None:
|
||||||
"""Ensure components.schemas exists in the schema."""
|
"""Ensure components.schemas exists in the schema."""
|
||||||
|
|
@ -23,55 +20,22 @@ def _ensure_components_schemas(openapi_schema: dict[str, Any]) -> None:
|
||||||
openapi_schema["components"]["schemas"] = {}
|
openapi_schema["components"]["schemas"] = {}
|
||||||
|
|
||||||
|
|
||||||
def _import_all_modules_in_package(package_name: str) -> list[Any]:
|
def _load_extra_schema_modules() -> None:
|
||||||
"""
|
"""
|
||||||
Dynamically import all modules in a package to trigger register_schema calls.
|
Import modules outside llama_stack_api that use schema_utils to register schemas.
|
||||||
|
|
||||||
This walks through all modules in the package and imports them, ensuring
|
The API package already imports its submodules via __init__, but server-side modules
|
||||||
that any register_schema() calls at module level are executed.
|
like telemetry need to be imported explicitly so their decorator side effects run.
|
||||||
|
|
||||||
Args:
|
|
||||||
package_name: The fully qualified package name (e.g., 'llama_stack_api')
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of imported module objects
|
|
||||||
"""
|
"""
|
||||||
modules = []
|
extra_modules = [
|
||||||
|
"llama_stack.core.telemetry.telemetry",
|
||||||
|
]
|
||||||
|
for module_name in extra_modules:
|
||||||
try:
|
try:
|
||||||
package = importlib.import_module(package_name)
|
importlib.import_module(module_name)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
return modules
|
|
||||||
|
|
||||||
package_path = getattr(package, "__path__", None)
|
|
||||||
if not package_path:
|
|
||||||
return modules
|
|
||||||
|
|
||||||
# Walk packages and modules recursively
|
|
||||||
for _, modname, ispkg in pkgutil.walk_packages(package_path, prefix=f"{package_name}."):
|
|
||||||
if not modname.startswith("_"):
|
|
||||||
try:
|
|
||||||
module = importlib.import_module(modname)
|
|
||||||
modules.append(module)
|
|
||||||
|
|
||||||
# If this is a package, also try to import any .py files directly
|
|
||||||
# (e.g., llama_stack_api.scoring_functions.scoring_functions)
|
|
||||||
if ispkg:
|
|
||||||
try:
|
|
||||||
# Try importing the module file with the same name as the package
|
|
||||||
# This handles cases like scoring_functions/scoring_functions.py
|
|
||||||
module_file_name = f"{modname}.{modname.split('.')[-1]}"
|
|
||||||
module_file = importlib.import_module(module_file_name)
|
|
||||||
if module_file not in modules:
|
|
||||||
modules.append(module_file)
|
|
||||||
except (ImportError, AttributeError, TypeError):
|
|
||||||
# It's okay if this fails - not all packages have a module file with the same name
|
|
||||||
pass
|
|
||||||
except (ImportError, AttributeError, TypeError):
|
|
||||||
# Skip modules that can't be imported (e.g., missing dependencies)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
return modules
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]) -> None:
|
def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
@ -102,82 +66,66 @@ def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]
|
||||||
|
|
||||||
def _ensure_json_schema_types_included(openapi_schema: dict[str, Any]) -> dict[str, Any]:
|
def _ensure_json_schema_types_included(openapi_schema: dict[str, Any]) -> dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Ensure all @json_schema_type decorated models and registered schemas are included in the OpenAPI schema.
|
Ensure all registered schemas (decorated, explicit, and dynamic) are included in the OpenAPI schema.
|
||||||
This finds all models with the _llama_stack_schema_type attribute and schemas registered via register_schema.
|
Relies on llama_stack_api's registry instead of recursively importing every module.
|
||||||
"""
|
"""
|
||||||
_ensure_components_schemas(openapi_schema)
|
_ensure_components_schemas(openapi_schema)
|
||||||
|
|
||||||
# Import TypeAdapter for handling union types and other non-model types
|
|
||||||
from pydantic import TypeAdapter
|
from pydantic import TypeAdapter
|
||||||
|
|
||||||
# Dynamically import all modules in packages that might register schemas
|
from llama_stack_api.schema_utils import (
|
||||||
# This ensures register_schema() calls execute and populate _registered_schemas
|
iter_dynamic_schema_types,
|
||||||
# Also collect the modules for later scanning of @json_schema_type decorated classes
|
iter_json_schema_types,
|
||||||
apis_modules = _import_all_modules_in_package("llama_stack_api")
|
iter_registered_schema_types,
|
||||||
_import_all_modules_in_package("llama_stack.core.telemetry")
|
)
|
||||||
|
|
||||||
# First, handle registered schemas (union types, etc.)
|
# Import extra modules (e.g., telemetry) whose schema registrations live outside llama_stack_api
|
||||||
from llama_stack_api.schema_utils import _registered_schemas
|
_load_extra_schema_modules()
|
||||||
|
|
||||||
for schema_type, registration_info in _registered_schemas.items():
|
# Handle explicitly registered schemas first (union types, Annotated structs, etc.)
|
||||||
schema_name = registration_info["name"]
|
for registration_info in iter_registered_schema_types():
|
||||||
|
schema_type = registration_info.type
|
||||||
|
schema_name = registration_info.name
|
||||||
if schema_name not in openapi_schema["components"]["schemas"]:
|
if schema_name not in openapi_schema["components"]["schemas"]:
|
||||||
try:
|
try:
|
||||||
# Use TypeAdapter for union types and other non-model types
|
|
||||||
# Use ref_template to generate references in the format we need
|
|
||||||
adapter = TypeAdapter(schema_type)
|
adapter = TypeAdapter(schema_type)
|
||||||
schema = adapter.json_schema(ref_template="#/components/schemas/{model}")
|
schema = adapter.json_schema(ref_template="#/components/schemas/{model}")
|
||||||
|
|
||||||
# Extract and fix $defs if present
|
|
||||||
_extract_and_fix_defs(schema, openapi_schema)
|
_extract_and_fix_defs(schema, openapi_schema)
|
||||||
|
|
||||||
openapi_schema["components"]["schemas"][schema_name] = schema
|
openapi_schema["components"]["schemas"][schema_name] = schema
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Skip if we can't generate the schema
|
|
||||||
print(f"Warning: Failed to generate schema for registered type {schema_name}: {e}")
|
print(f"Warning: Failed to generate schema for registered type {schema_name}: {e}")
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Find all classes with the _llama_stack_schema_type attribute
|
# Add @json_schema_type decorated models
|
||||||
# Use the modules we already imported above
|
for model in iter_json_schema_types():
|
||||||
for module in apis_modules:
|
schema_name = getattr(model, "_llama_stack_schema_name", None) or getattr(model, "__name__", None)
|
||||||
for attr_name in dir(module):
|
if not schema_name:
|
||||||
try:
|
continue
|
||||||
attr = getattr(module, attr_name)
|
|
||||||
if (
|
|
||||||
hasattr(attr, "_llama_stack_schema_type")
|
|
||||||
and hasattr(attr, "model_json_schema")
|
|
||||||
and hasattr(attr, "__name__")
|
|
||||||
):
|
|
||||||
schema_name = attr.__name__
|
|
||||||
if schema_name not in openapi_schema["components"]["schemas"]:
|
if schema_name not in openapi_schema["components"]["schemas"]:
|
||||||
try:
|
try:
|
||||||
# Use ref_template to ensure consistent reference format and $defs handling
|
if hasattr(model, "model_json_schema"):
|
||||||
schema = attr.model_json_schema(ref_template="#/components/schemas/{model}")
|
schema = model.model_json_schema(ref_template="#/components/schemas/{model}")
|
||||||
# Extract and fix $defs if present (model_json_schema can also generate $defs)
|
else:
|
||||||
|
adapter = TypeAdapter(model)
|
||||||
|
schema = adapter.json_schema(ref_template="#/components/schemas/{model}")
|
||||||
_extract_and_fix_defs(schema, openapi_schema)
|
_extract_and_fix_defs(schema, openapi_schema)
|
||||||
openapi_schema["components"]["schemas"][schema_name] = schema
|
openapi_schema["components"]["schemas"][schema_name] = schema
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Skip if we can't generate the schema
|
|
||||||
print(f"Warning: Failed to generate schema for {schema_name}: {e}")
|
print(f"Warning: Failed to generate schema for {schema_name}: {e}")
|
||||||
continue
|
continue
|
||||||
except (AttributeError, TypeError):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Also include any dynamic models that were created during endpoint generation
|
# Include any dynamic models generated while building endpoints
|
||||||
# This is a workaround to ensure dynamic models appear in the schema
|
for model in iter_dynamic_schema_types():
|
||||||
for model in _dynamic_models:
|
|
||||||
try:
|
try:
|
||||||
schema_name = model.__name__
|
schema_name = model.__name__
|
||||||
if schema_name not in openapi_schema["components"]["schemas"]:
|
if schema_name not in openapi_schema["components"]["schemas"]:
|
||||||
schema = model.model_json_schema(ref_template="#/components/schemas/{model}")
|
schema = model.model_json_schema(ref_template="#/components/schemas/{model}")
|
||||||
# Extract and fix $defs if present
|
|
||||||
_extract_and_fix_defs(schema, openapi_schema)
|
_extract_and_fix_defs(schema, openapi_schema)
|
||||||
openapi_schema["components"]["schemas"][schema_name] = schema
|
openapi_schema["components"]["schemas"][schema_name] = schema
|
||||||
except Exception:
|
except Exception:
|
||||||
# Skip if we can't generate the schema
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
return openapi_schema
|
return openapi_schema
|
||||||
|
|
|
||||||
|
|
@ -16,39 +16,6 @@ from llama_stack_api.version import (
|
||||||
LLAMA_STACK_API_V1BETA,
|
LLAMA_STACK_API_V1BETA,
|
||||||
)
|
)
|
||||||
|
|
||||||
from . import schema_collection
|
|
||||||
|
|
||||||
|
|
||||||
def _get_all_json_schema_type_names() -> set[str]:
|
|
||||||
"""
|
|
||||||
Get all schema names from @json_schema_type decorated models.
|
|
||||||
This ensures they are included in filtered schemas even if not directly referenced by paths.
|
|
||||||
"""
|
|
||||||
schema_names = set()
|
|
||||||
apis_modules = schema_collection._import_all_modules_in_package("llama_stack_api")
|
|
||||||
for module in apis_modules:
|
|
||||||
for attr_name in dir(module):
|
|
||||||
try:
|
|
||||||
attr = getattr(module, attr_name)
|
|
||||||
if (
|
|
||||||
hasattr(attr, "_llama_stack_schema_type")
|
|
||||||
and hasattr(attr, "model_json_schema")
|
|
||||||
and hasattr(attr, "__name__")
|
|
||||||
):
|
|
||||||
schema_names.add(attr.__name__)
|
|
||||||
except (AttributeError, TypeError):
|
|
||||||
continue
|
|
||||||
return schema_names
|
|
||||||
|
|
||||||
|
|
||||||
def _get_explicit_schema_names(openapi_schema: dict[str, Any]) -> set[str]:
|
|
||||||
"""Get all registered schema names and @json_schema_type decorated model names."""
|
|
||||||
from llama_stack_api.schema_utils import _registered_schemas
|
|
||||||
|
|
||||||
registered_schema_names = {info["name"] for info in _registered_schemas.values()}
|
|
||||||
json_schema_type_names = _get_all_json_schema_type_names()
|
|
||||||
return registered_schema_names | json_schema_type_names
|
|
||||||
|
|
||||||
|
|
||||||
def _find_schema_refs_in_object(obj: Any) -> set[str]:
|
def _find_schema_refs_in_object(obj: Any) -> set[str]:
|
||||||
"""
|
"""
|
||||||
|
|
@ -70,17 +37,8 @@ def _find_schema_refs_in_object(obj: Any) -> set[str]:
|
||||||
return refs
|
return refs
|
||||||
|
|
||||||
|
|
||||||
def _add_transitive_references(
|
def _add_transitive_references(referenced_schemas: set[str], all_schemas: dict[str, Any]) -> set[str]:
|
||||||
referenced_schemas: set[str], all_schemas: dict[str, Any], initial_schemas: set[str] | None = None
|
|
||||||
) -> set[str]:
|
|
||||||
"""Add transitive references for given schemas."""
|
"""Add transitive references for given schemas."""
|
||||||
if initial_schemas:
|
|
||||||
referenced_schemas.update(initial_schemas)
|
|
||||||
additional_schemas = set()
|
|
||||||
for schema_name in initial_schemas:
|
|
||||||
if schema_name in all_schemas:
|
|
||||||
additional_schemas.update(_find_schema_refs_in_object(all_schemas[schema_name]))
|
|
||||||
else:
|
|
||||||
additional_schemas = set()
|
additional_schemas = set()
|
||||||
for schema_name in referenced_schemas:
|
for schema_name in referenced_schemas:
|
||||||
if schema_name in all_schemas:
|
if schema_name in all_schemas:
|
||||||
|
|
@ -155,8 +113,7 @@ def _filter_schemas_by_references(
|
||||||
|
|
||||||
referenced_schemas = _find_schemas_referenced_by_paths(filtered_paths, openapi_schema)
|
referenced_schemas = _find_schemas_referenced_by_paths(filtered_paths, openapi_schema)
|
||||||
all_schemas = openapi_schema.get("components", {}).get("schemas", {})
|
all_schemas = openapi_schema.get("components", {}).get("schemas", {})
|
||||||
explicit_schema_names = _get_explicit_schema_names(openapi_schema)
|
referenced_schemas = _add_transitive_references(referenced_schemas, all_schemas)
|
||||||
referenced_schemas = _add_transitive_references(referenced_schemas, all_schemas, explicit_schema_names)
|
|
||||||
|
|
||||||
filtered_schemas = {
|
filtered_schemas = {
|
||||||
name: schema for name, schema in filtered_schema["components"]["schemas"].items() if name in referenced_schemas
|
name: schema for name, schema in filtered_schema["components"]["schemas"].items() if name in referenced_schemas
|
||||||
|
|
|
||||||
|
|
@ -19,14 +19,13 @@ from openapi_spec_validator.exceptions import OpenAPISpecValidatorError
|
||||||
|
|
||||||
from . import endpoints, schema_collection
|
from . import endpoints, schema_collection
|
||||||
from ._legacy_order import (
|
from ._legacy_order import (
|
||||||
|
LEGACY_OPERATION_KEYS,
|
||||||
LEGACY_PATH_ORDER,
|
LEGACY_PATH_ORDER,
|
||||||
LEGACY_RESPONSE_ORDER,
|
LEGACY_RESPONSE_ORDER,
|
||||||
LEGACY_SCHEMA_ORDER,
|
LEGACY_SCHEMA_ORDER,
|
||||||
LEGACY_OPERATION_KEYS,
|
|
||||||
LEGACY_SECURITY,
|
LEGACY_SECURITY,
|
||||||
LEGACY_TAGS,
|
|
||||||
LEGACY_TAG_GROUPS,
|
LEGACY_TAG_GROUPS,
|
||||||
LEGACY_TAG_ORDER,
|
LEGACY_TAGS,
|
||||||
)
|
)
|
||||||
from .state import _extra_body_fields
|
from .state import _extra_body_fields
|
||||||
|
|
||||||
|
|
@ -864,7 +863,15 @@ def _apply_legacy_sorting(openapi_schema: dict[str, Any]) -> dict[str, Any]:
|
||||||
ordered_path_item[method] = order_mapping(path_item[method], LEGACY_OPERATION_KEYS)
|
ordered_path_item[method] = order_mapping(path_item[method], LEGACY_OPERATION_KEYS)
|
||||||
for key, value in path_item.items():
|
for key, value in path_item.items():
|
||||||
if key not in ordered_path_item:
|
if key not in ordered_path_item:
|
||||||
if isinstance(value, dict) and key.lower() in {"get", "post", "put", "delete", "patch", "head", "options"}:
|
if isinstance(value, dict) and key.lower() in {
|
||||||
|
"get",
|
||||||
|
"post",
|
||||||
|
"put",
|
||||||
|
"delete",
|
||||||
|
"patch",
|
||||||
|
"head",
|
||||||
|
"options",
|
||||||
|
}:
|
||||||
ordered_path_item[key] = order_mapping(value, LEGACY_OPERATION_KEYS)
|
ordered_path_item[key] = order_mapping(value, LEGACY_OPERATION_KEYS)
|
||||||
else:
|
else:
|
||||||
ordered_path_item[key] = value
|
ordered_path_item[key] = value
|
||||||
|
|
|
||||||
|
|
@ -11,9 +11,8 @@ Shared state for the OpenAPI generator module.
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack_api import Api
|
from llama_stack_api import Api
|
||||||
|
from llama_stack_api.schema_utils import clear_dynamic_schema_types, register_dynamic_schema_type
|
||||||
|
|
||||||
# Global list to store dynamic models created during endpoint generation
|
|
||||||
_dynamic_models: list[Any] = []
|
|
||||||
_dynamic_model_registry: dict[str, type] = {}
|
_dynamic_model_registry: dict[str, type] = {}
|
||||||
|
|
||||||
# Cache for protocol methods to avoid repeated lookups
|
# Cache for protocol methods to avoid repeated lookups
|
||||||
|
|
@ -28,14 +27,15 @@ def register_dynamic_model(name: str, model: type) -> type:
|
||||||
"""Register and deduplicate dynamically generated request models."""
|
"""Register and deduplicate dynamically generated request models."""
|
||||||
existing = _dynamic_model_registry.get(name)
|
existing = _dynamic_model_registry.get(name)
|
||||||
if existing is not None:
|
if existing is not None:
|
||||||
|
register_dynamic_schema_type(existing)
|
||||||
return existing
|
return existing
|
||||||
_dynamic_model_registry[name] = model
|
_dynamic_model_registry[name] = model
|
||||||
_dynamic_models.append(model)
|
register_dynamic_schema_type(model)
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def reset_generator_state() -> None:
|
def reset_generator_state() -> None:
|
||||||
"""Clear per-run caches so repeated generations stay deterministic."""
|
"""Clear per-run caches so repeated generations stay deterministic."""
|
||||||
_dynamic_models.clear()
|
|
||||||
_dynamic_model_registry.clear()
|
_dynamic_model_registry.clear()
|
||||||
_extra_body_fields.clear()
|
_extra_body_fields.clear()
|
||||||
|
clear_dynamic_schema_types()
|
||||||
|
|
|
||||||
|
|
@ -353,8 +353,15 @@ from .safety import (
|
||||||
from .schema_utils import (
|
from .schema_utils import (
|
||||||
CallableT,
|
CallableT,
|
||||||
ExtraBodyField,
|
ExtraBodyField,
|
||||||
|
SchemaInfo,
|
||||||
WebMethod,
|
WebMethod,
|
||||||
|
clear_dynamic_schema_types,
|
||||||
|
get_registered_schema_info,
|
||||||
|
iter_dynamic_schema_types,
|
||||||
|
iter_json_schema_types,
|
||||||
|
iter_registered_schema_types,
|
||||||
json_schema_type,
|
json_schema_type,
|
||||||
|
register_dynamic_schema_type,
|
||||||
register_schema,
|
register_schema,
|
||||||
webmethod,
|
webmethod,
|
||||||
)
|
)
|
||||||
|
|
@ -516,6 +523,7 @@ __all__ = [
|
||||||
"ExtraBodyField",
|
"ExtraBodyField",
|
||||||
"Files",
|
"Files",
|
||||||
"Fp8QuantizationConfig",
|
"Fp8QuantizationConfig",
|
||||||
|
"clear_dynamic_schema_types",
|
||||||
"get_schema_identifier",
|
"get_schema_identifier",
|
||||||
"get_signature",
|
"get_signature",
|
||||||
"GrammarResponseFormat",
|
"GrammarResponseFormat",
|
||||||
|
|
@ -536,6 +544,10 @@ __all__ = [
|
||||||
"is_type_optional",
|
"is_type_optional",
|
||||||
"is_type_union",
|
"is_type_union",
|
||||||
"is_unwrapped_body_param",
|
"is_unwrapped_body_param",
|
||||||
|
"iter_dynamic_schema_types",
|
||||||
|
"iter_json_schema_types",
|
||||||
|
"iter_registered_schema_types",
|
||||||
|
"get_registered_schema_info",
|
||||||
"Job",
|
"Job",
|
||||||
"JobStatus",
|
"JobStatus",
|
||||||
"json_dump_string",
|
"json_dump_string",
|
||||||
|
|
@ -738,6 +750,7 @@ __all__ = [
|
||||||
"RAGQueryGeneratorConfig",
|
"RAGQueryGeneratorConfig",
|
||||||
"RAGQueryResult",
|
"RAGQueryResult",
|
||||||
"RAGSearchMode",
|
"RAGSearchMode",
|
||||||
|
"register_dynamic_schema_type",
|
||||||
"register_schema",
|
"register_schema",
|
||||||
"RLHFAlgorithm",
|
"RLHFAlgorithm",
|
||||||
"RRFRanker",
|
"RRFRanker",
|
||||||
|
|
@ -775,6 +788,7 @@ __all__ = [
|
||||||
"ScoringResult",
|
"ScoringResult",
|
||||||
"ScoringResultRow",
|
"ScoringResultRow",
|
||||||
"Schema",
|
"Schema",
|
||||||
|
"SchemaInfo",
|
||||||
"SchemaOptions",
|
"SchemaOptions",
|
||||||
"SearchRankingOptions",
|
"SearchRankingOptions",
|
||||||
"Shield",
|
"Shield",
|
||||||
|
|
|
||||||
|
|
@ -4,9 +4,9 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from collections.abc import Callable
|
from collections.abc import Callable, Iterable
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, TypeVar
|
from typing import Any, Literal, TypeVar
|
||||||
|
|
||||||
|
|
||||||
class ExtraBodyField[T]:
|
class ExtraBodyField[T]:
|
||||||
|
|
@ -46,6 +46,21 @@ class ExtraBodyField[T]:
|
||||||
self.description = description
|
self.description = description
|
||||||
|
|
||||||
|
|
||||||
|
SchemaSource = Literal["json_schema_type", "registered_schema", "dynamic_schema"]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class SchemaInfo:
|
||||||
|
"""Metadata describing a schema entry exposed to OpenAPI generation."""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
type: Any
|
||||||
|
source: SchemaSource
|
||||||
|
|
||||||
|
|
||||||
|
_json_schema_types: dict[type, SchemaInfo] = {}
|
||||||
|
|
||||||
|
|
||||||
def json_schema_type(cls):
|
def json_schema_type(cls):
|
||||||
"""
|
"""
|
||||||
Decorator to mark a Pydantic model for top-level component registration.
|
Decorator to mark a Pydantic model for top-level component registration.
|
||||||
|
|
@ -57,11 +72,15 @@ def json_schema_type(cls):
|
||||||
for simple one-off types while keeping complex reusable types as components.
|
for simple one-off types while keeping complex reusable types as components.
|
||||||
"""
|
"""
|
||||||
cls._llama_stack_schema_type = True
|
cls._llama_stack_schema_type = True
|
||||||
|
schema_name = getattr(cls, "__name__", f"Anonymous_{id(cls)}")
|
||||||
|
cls._llama_stack_schema_name = schema_name
|
||||||
|
_json_schema_types.setdefault(cls, SchemaInfo(name=schema_name, type=cls, source="json_schema_type"))
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|
||||||
# Global registry for registered schemas
|
# Global registries for schemas discoverable by the generator
|
||||||
_registered_schemas = {}
|
_registered_schemas: dict[Any, SchemaInfo] = {}
|
||||||
|
_dynamic_schema_types: dict[type, SchemaInfo] = {}
|
||||||
|
|
||||||
|
|
||||||
def register_schema(schema_type, name: str | None = None):
|
def register_schema(schema_type, name: str | None = None):
|
||||||
|
|
@ -82,11 +101,43 @@ def register_schema(schema_type, name: str | None = None):
|
||||||
|
|
||||||
# Store the registration information in a global registry
|
# Store the registration information in a global registry
|
||||||
# since union types don't allow setting attributes
|
# since union types don't allow setting attributes
|
||||||
_registered_schemas[schema_type] = {"name": name, "type": schema_type}
|
_registered_schemas[schema_type] = SchemaInfo(name=name, type=schema_type, source="registered_schema")
|
||||||
|
|
||||||
return schema_type
|
return schema_type
|
||||||
|
|
||||||
|
|
||||||
|
def get_registered_schema_info(schema_type: Any) -> SchemaInfo | None:
|
||||||
|
"""Return the registration metadata for a schema type if present."""
|
||||||
|
return _registered_schemas.get(schema_type)
|
||||||
|
|
||||||
|
|
||||||
|
def iter_registered_schema_types() -> Iterable[SchemaInfo]:
|
||||||
|
"""Iterate over all explicitly registered schema entries."""
|
||||||
|
return tuple(_registered_schemas.values())
|
||||||
|
|
||||||
|
|
||||||
|
def iter_json_schema_types() -> Iterable[type]:
|
||||||
|
"""Iterate over all Pydantic models decorated with @json_schema_type."""
|
||||||
|
return tuple(info.type for info in _json_schema_types.values())
|
||||||
|
|
||||||
|
|
||||||
|
def iter_dynamic_schema_types() -> Iterable[type]:
|
||||||
|
"""Iterate over dynamic models registered at generation time."""
|
||||||
|
return tuple(info.type for info in _dynamic_schema_types.values())
|
||||||
|
|
||||||
|
|
||||||
|
def register_dynamic_schema_type(schema_type: type, name: str | None = None) -> type:
|
||||||
|
"""Register a dynamic model generated at runtime for schema inclusion."""
|
||||||
|
schema_name = name if name is not None else getattr(schema_type, "__name__", f"Anonymous_{id(schema_type)}")
|
||||||
|
_dynamic_schema_types[schema_type] = SchemaInfo(name=schema_name, type=schema_type, source="dynamic_schema")
|
||||||
|
return schema_type
|
||||||
|
|
||||||
|
|
||||||
|
def clear_dynamic_schema_types() -> None:
|
||||||
|
"""Clear dynamic schema registrations."""
|
||||||
|
_dynamic_schema_types.clear()
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class WebMethod:
|
class WebMethod:
|
||||||
level: str | None = None
|
level: str | None = None
|
||||||
|
|
|
||||||
48
tests/unit/server/test_schema_registry.py
Normal file
48
tests/unit/server/test_schema_registry.py
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from llama_stack_api import Conversation, SamplingStrategy
|
||||||
|
from llama_stack_api.schema_utils import (
|
||||||
|
clear_dynamic_schema_types,
|
||||||
|
get_registered_schema_info,
|
||||||
|
iter_dynamic_schema_types,
|
||||||
|
iter_json_schema_types,
|
||||||
|
iter_registered_schema_types,
|
||||||
|
register_dynamic_schema_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_json_schema_registry_contains_known_model() -> None:
|
||||||
|
assert Conversation in iter_json_schema_types()
|
||||||
|
|
||||||
|
|
||||||
|
def test_registered_schema_registry_contains_sampling_strategy() -> None:
|
||||||
|
registered_names = {info.name for info in iter_registered_schema_types()}
|
||||||
|
assert "SamplingStrategy" in registered_names
|
||||||
|
|
||||||
|
schema_info = get_registered_schema_info(SamplingStrategy)
|
||||||
|
assert schema_info is not None
|
||||||
|
assert schema_info.name == "SamplingStrategy"
|
||||||
|
|
||||||
|
|
||||||
|
def test_dynamic_schema_registration_round_trip() -> None:
|
||||||
|
existing_models = tuple(iter_dynamic_schema_types())
|
||||||
|
clear_dynamic_schema_types()
|
||||||
|
try:
|
||||||
|
|
||||||
|
class TemporaryModel(BaseModel):
|
||||||
|
foo: str
|
||||||
|
|
||||||
|
register_dynamic_schema_type(TemporaryModel)
|
||||||
|
assert TemporaryModel in iter_dynamic_schema_types()
|
||||||
|
|
||||||
|
clear_dynamic_schema_types()
|
||||||
|
assert TemporaryModel not in iter_dynamic_schema_types()
|
||||||
|
finally:
|
||||||
|
for model in existing_models:
|
||||||
|
register_dynamic_schema_type(model)
|
||||||
Loading…
Add table
Add a link
Reference in a new issue