diff --git a/client-sdks/stainless/openapi.yml b/client-sdks/stainless/openapi.yml index 118a887d6..22f8d3d5d 100644 --- a/client-sdks/stainless/openapi.yml +++ b/client-sdks/stainless/openapi.yml @@ -3476,7 +3476,7 @@ paths: post: tags: - V1Beta - summary: Append Rows + summary: Append rows to a dataset. description: Generic endpoint - this would be replaced with actual implementation. operationId: append_rows_v1beta_datasetio_append_rows__dataset_id__post parameters: @@ -3518,16 +3518,10 @@ paths: get: tags: - V1Beta - summary: Iterrows + summary: Get a paginated list of rows from a dataset. description: Query endpoint for proper schema generation. operationId: iterrows_v1beta_datasetio_iterrows__dataset_id__get parameters: - - name: dataset_id - in: path - required: true - schema: - type: string - title: Dataset Id - name: limit in: query required: true @@ -3540,6 +3534,12 @@ paths: schema: type: integer title: Start Index + - name: dataset_id + in: path + required: true + schema: + type: string + title: Dataset Id responses: '200': description: A PaginatedResponse. @@ -3563,7 +3563,7 @@ paths: get: tags: - V1Beta - summary: List Datasets + summary: List all datasets. description: Response-only endpoint for proper schema generation. operationId: list_datasets_v1beta_datasets_get responses: @@ -3588,7 +3588,7 @@ paths: post: tags: - V1Beta - summary: Register Dataset + summary: Register a new dataset. description: Typed endpoint for proper schema generation. operationId: register_dataset_v1beta_datasets_post requestBody: @@ -3620,7 +3620,7 @@ paths: delete: tags: - V1Beta - summary: Unregister Dataset + summary: Unregister a dataset by its ID. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_dataset_v1beta_datasets__dataset_id__delete parameters: @@ -3661,7 +3661,7 @@ paths: get: tags: - V1Beta - summary: Get Dataset + summary: Get a dataset by its ID. description: Query endpoint for proper schema generation. operationId: get_dataset_v1beta_datasets__dataset_id__get parameters: @@ -3694,7 +3694,7 @@ paths: get: tags: - V1Alpha - summary: List Agents + summary: List all agents. description: Query endpoint for proper schema generation. operationId: list_agents_v1alpha_agents_get parameters: @@ -3732,7 +3732,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent + summary: Create an agent with the given configuration. description: Typed endpoint for proper schema generation. operationId: create_agent_v1alpha_agents_post requestBody: @@ -3764,7 +3764,7 @@ paths: delete: tags: - V1Alpha - summary: Delete Agent + summary: Delete an agent by its ID and its associated sessions and turns. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_agent_v1alpha_agents__agent_id__delete parameters: @@ -3783,7 +3783,7 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to delete. responses: '200': description: Successful Response @@ -3805,7 +3805,7 @@ paths: get: tags: - V1Alpha - summary: Get Agent + summary: Describe an agent by its ID. description: Query endpoint for proper schema generation. operationId: get_agent_v1alpha_agents__agent_id__get parameters: @@ -3815,6 +3815,7 @@ paths: schema: type: string title: Agent Id + description: ID of the agent. responses: '200': description: An Agent of the agent. @@ -3838,7 +3839,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent Session + summary: Create a new session for an agent. description: Typed endpoint for proper schema generation. operationId: create_agent_session_v1alpha_agents__agent_id__session_post requestBody: @@ -3872,12 +3873,12 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to create the session for. /v1alpha/agents/{agent_id}/session/{session_id}: delete: tags: - V1Alpha - summary: Delete Agents Session + summary: Delete an agent session by its ID and its associated turns. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_agents_session_v1alpha_agents__agent_id__session__session_id__delete parameters: @@ -3891,18 +3892,18 @@ paths: required: true schema: title: Kwargs - - name: agent_id - in: path - required: true - schema: - type: string - description: 'Path parameter: agent_id' - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to delete. + - name: agent_id + in: path + required: true + schema: + type: string + description: The ID of the agent to delete the session for. responses: '200': description: Successful Response @@ -3924,28 +3925,30 @@ paths: get: tags: - V1Alpha - summary: Get Agents Session + summary: Retrieve an agent session by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_session_v1alpha_agents__agent_id__session__session_id__get parameters: - - name: agent_id - in: path - required: true - schema: - type: string - title: Agent Id - - name: session_id - in: path - required: true - schema: - type: string - title: Session Id - name: turn_ids in: query required: true schema: type: string title: Turn Ids + - name: session_id + in: path + required: true + schema: + type: string + title: Session Id + description: The ID of the session to get. + - name: agent_id + in: path + required: true + schema: + type: string + title: Agent Id + description: The ID of the agent to get the session for. responses: '200': description: A Session. @@ -3969,7 +3972,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent Turn + summary: Create a new turn for an agent. description: Typed endpoint for proper schema generation. operationId: create_agent_turn_v1alpha_agents__agent_id__session__session_id__turn_post requestBody: @@ -4003,18 +4006,18 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to create the turn for. - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to create the turn for. /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}: get: tags: - V1Alpha - summary: Get Agents Turn + summary: Retrieve an agent turn by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__get parameters: @@ -4024,18 +4027,21 @@ paths: schema: type: string title: Agent Id + description: The ID of the agent to get the turn for. - name: session_id in: path required: true schema: type: string title: Session Id + description: The ID of the session to get the turn for. - name: turn_id in: path required: true schema: type: string title: Turn Id + description: The ID of the turn to get. responses: '200': description: A Turn. @@ -4059,7 +4065,7 @@ paths: post: tags: - V1Alpha - summary: Resume Agent Turn + summary: Resume an agent turn with executed tool call responses. description: Typed endpoint for proper schema generation. operationId: resume_agent_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__resume_post requestBody: @@ -4093,24 +4099,24 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to resume. - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to resume. - name: turn_id in: path required: true schema: type: string - description: 'Path parameter: turn_id' + description: The ID of the turn to resume. /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: get: tags: - V1Alpha - summary: Get Agents Step + summary: Retrieve an agent step by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_step_v1alpha_agents__agent_id__session__session_id__turn__turn_id__step__step_id__get parameters: @@ -4120,24 +4126,28 @@ paths: schema: type: string title: Agent Id + description: The ID of the agent to get the step for. - name: session_id in: path required: true schema: type: string title: Session Id - - name: step_id - in: path - required: true - schema: - type: string - title: Step Id + description: The ID of the session to get the step for. - name: turn_id in: path required: true schema: type: string title: Turn Id + description: The ID of the turn to get the step for. + - name: step_id + in: path + required: true + schema: + type: string + title: Step Id + description: The ID of the step to get. responses: '200': description: An AgentStepResponse. @@ -4161,16 +4171,10 @@ paths: get: tags: - V1Alpha - summary: List Agent Sessions + summary: List all session(s) of a given agent. description: Query endpoint for proper schema generation. operationId: list_agent_sessions_v1alpha_agents__agent_id__sessions_get parameters: - - name: agent_id - in: path - required: true - schema: - type: string - title: Agent Id - name: limit in: query required: true @@ -4183,6 +4187,13 @@ paths: schema: type: integer title: Start Index + - name: agent_id + in: path + required: true + schema: + type: string + title: Agent Id + description: The ID of the agent to list sessions for. responses: '200': description: A PaginatedResponse. @@ -4206,7 +4217,7 @@ paths: get: tags: - V1Alpha - summary: List Benchmarks + summary: List all benchmarks. description: Response-only endpoint for proper schema generation. operationId: list_benchmarks_v1alpha_eval_benchmarks_get responses: @@ -4231,7 +4242,7 @@ paths: post: tags: - V1Alpha - summary: Register Benchmark + summary: Register a benchmark. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_benchmark_v1alpha_eval_benchmarks_post parameters: @@ -4267,7 +4278,7 @@ paths: delete: tags: - V1Alpha - summary: Unregister Benchmark + summary: Unregister a benchmark. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_benchmark_v1alpha_eval_benchmarks__benchmark_id__delete parameters: @@ -4286,7 +4297,7 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to unregister. responses: '200': description: Successful Response @@ -4308,7 +4319,7 @@ paths: get: tags: - V1Alpha - summary: Get Benchmark + summary: Get a benchmark by its ID. description: Query endpoint for proper schema generation. operationId: get_benchmark_v1alpha_eval_benchmarks__benchmark_id__get parameters: @@ -4318,6 +4329,7 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to get. responses: '200': description: A Benchmark. @@ -4341,7 +4353,7 @@ paths: post: tags: - V1Alpha - summary: Evaluate Rows + summary: Evaluate a list of rows on a benchmark. description: Typed endpoint for proper schema generation. operationId: evaluate_rows_v1alpha_eval_benchmarks__benchmark_id__evaluations_post requestBody: @@ -4375,12 +4387,12 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. /v1alpha/eval/benchmarks/{benchmark_id}/jobs: post: tags: - V1Alpha - summary: Run Eval + summary: Run an evaluation on a benchmark. description: Typed endpoint for proper schema generation. operationId: run_eval_v1alpha_eval_benchmarks__benchmark_id__jobs_post requestBody: @@ -4414,12 +4426,12 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}: delete: tags: - V1Alpha - summary: Job Cancel + summary: Cancel a job. description: Generic endpoint - this would be replaced with actual implementation. operationId: job_cancel_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__delete parameters: @@ -4438,13 +4450,13 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string - description: 'Path parameter: job_id' + description: The ID of the job to cancel. responses: '200': description: Successful Response @@ -4466,7 +4478,7 @@ paths: get: tags: - V1Alpha - summary: Job Status + summary: Get the status of a job. description: Query endpoint for proper schema generation. operationId: job_status_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__get parameters: @@ -4476,12 +4488,14 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string title: Job Id + description: The ID of the job to get the status of. responses: '200': description: The status of the evaluation job. @@ -4505,7 +4519,7 @@ paths: get: tags: - V1Alpha - summary: Job Result + summary: Get the result of a job. description: Query endpoint for proper schema generation. operationId: job_result_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__result_get parameters: @@ -4515,12 +4529,14 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string title: Job Id + description: The ID of the job to get the result of. responses: '200': description: The result of the job. @@ -4544,7 +4560,7 @@ paths: post: tags: - V1Alpha - summary: Rerank + summary: Rerank a list of documents based on their relevance to a query. description: Typed endpoint for proper schema generation. operationId: rerank_v1alpha_inference_rerank_post requestBody: @@ -4576,7 +4592,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Job Artifacts + summary: Get the artifacts of a training job. description: Query endpoint for proper schema generation. operationId: get_training_job_artifacts_v1alpha_post_training_job_artifacts_get parameters: @@ -4609,7 +4625,7 @@ paths: post: tags: - V1Alpha - summary: Cancel Training Job + summary: Cancel a training job. description: Generic endpoint - this would be replaced with actual implementation. operationId: cancel_training_job_v1alpha_post_training_job_cancel_post parameters: @@ -4645,7 +4661,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Job Status + summary: Get the status of a training job. description: Query endpoint for proper schema generation. operationId: get_training_job_status_v1alpha_post_training_job_status_get parameters: @@ -4678,7 +4694,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Jobs + summary: Get all training jobs. description: Response-only endpoint for proper schema generation. operationId: get_training_jobs_v1alpha_post_training_jobs_get responses: @@ -4704,7 +4720,7 @@ paths: post: tags: - V1Alpha - summary: Preference Optimize + summary: Run preference optimization of a model. description: Typed endpoint for proper schema generation. operationId: preference_optimize_v1alpha_post_training_preference_optimize_post requestBody: @@ -4736,7 +4752,7 @@ paths: post: tags: - V1Alpha - summary: Supervised Fine Tune + summary: Run supervised fine-tuning of a model. description: Typed endpoint for proper schema generation. operationId: supervised_fine_tune_v1alpha_post_training_supervised_fine_tune_post requestBody: @@ -4768,7 +4784,7 @@ paths: get: tags: - V1 - summary: List Batches + summary: List all batches for the current user. description: Query endpoint for proper schema generation. operationId: list_batches_v1_batches_get parameters: @@ -4807,7 +4823,7 @@ paths: post: tags: - V1 - summary: Create Batch + summary: Create a new batch for processing multiple API requests. description: Typed endpoint for proper schema generation. operationId: create_batch_v1_batches_post requestBody: @@ -4839,7 +4855,7 @@ paths: get: tags: - V1 - summary: Retrieve Batch + summary: Retrieve information about a specific batch. description: Query endpoint for proper schema generation. operationId: retrieve_batch_v1_batches__batch_id__get parameters: @@ -4849,6 +4865,7 @@ paths: schema: type: string title: Batch Id + description: The ID of the batch to retrieve. responses: '200': description: The batch object. @@ -4872,7 +4889,7 @@ paths: post: tags: - V1 - summary: Cancel Batch + summary: Cancel a batch that is in progress. description: Typed endpoint for proper schema generation. operationId: cancel_batch_v1_batches__batch_id__cancel_post requestBody: @@ -4906,12 +4923,12 @@ paths: required: true schema: type: string - description: 'Path parameter: batch_id' + description: The ID of the batch to cancel. /v1/chat/completions: get: tags: - V1 - summary: List Chat Completions + summary: List chat completions. description: Query endpoint for proper schema generation. operationId: list_chat_completions_v1_chat_completions_get parameters: @@ -4962,7 +4979,7 @@ paths: post: tags: - V1 - summary: Openai Chat Completion + summary: Create chat completions. description: Typed endpoint for proper schema generation. operationId: openai_chat_completion_v1_chat_completions_post requestBody: @@ -4994,7 +5011,7 @@ paths: get: tags: - V1 - summary: Get Chat Completion + summary: Get chat completion. description: Query endpoint for proper schema generation. operationId: get_chat_completion_v1_chat_completions__completion_id__get parameters: @@ -5004,6 +5021,7 @@ paths: schema: type: string title: Completion Id + description: ID of the chat completion. responses: '200': description: A OpenAICompletionWithInputMessages. @@ -5027,7 +5045,7 @@ paths: post: tags: - V1 - summary: Openai Completion + summary: Create completion. description: Typed endpoint for proper schema generation. operationId: openai_completion_v1_completions_post requestBody: @@ -5059,7 +5077,7 @@ paths: post: tags: - V1 - summary: Create Conversation + summary: Create a conversation. description: Typed endpoint for proper schema generation. operationId: create_conversation_v1_conversations_post requestBody: @@ -5091,7 +5109,7 @@ paths: delete: tags: - V1 - summary: Openai Delete Conversation + summary: Delete a conversation. description: Query endpoint for proper schema generation. operationId: openai_delete_conversation_v1_conversations__conversation_id__delete parameters: @@ -5101,6 +5119,7 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. responses: '200': description: The deleted conversation resource. @@ -5123,7 +5142,7 @@ paths: get: tags: - V1 - summary: Get Conversation + summary: Retrieve a conversation. description: Query endpoint for proper schema generation. operationId: get_conversation_v1_conversations__conversation_id__get parameters: @@ -5133,6 +5152,7 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. responses: '200': description: The conversation object. @@ -5155,7 +5175,7 @@ paths: post: tags: - V1 - summary: Update Conversation + summary: Update a conversation. description: Typed endpoint for proper schema generation. operationId: update_conversation_v1_conversations__conversation_id__post requestBody: @@ -5189,21 +5209,15 @@ paths: required: true schema: type: string - description: 'Path parameter: conversation_id' + description: The conversation identifier. /v1/conversations/{conversation_id}/items: get: tags: - V1 - summary: List Items + summary: List items. description: Query endpoint for proper schema generation. operationId: list_items_v1_conversations__conversation_id__items_get parameters: - - name: conversation_id - in: path - required: true - schema: - type: string - title: Conversation Id - name: after in: query required: true @@ -5227,6 +5241,13 @@ paths: schema: type: string title: Order + - name: conversation_id + in: path + required: true + schema: + type: string + title: Conversation Id + description: The conversation identifier. responses: '200': description: List of conversation items. @@ -5249,7 +5270,7 @@ paths: post: tags: - V1 - summary: Add Items + summary: Create items. description: Typed endpoint for proper schema generation. operationId: add_items_v1_conversations__conversation_id__items_post requestBody: @@ -5283,12 +5304,12 @@ paths: required: true schema: type: string - description: 'Path parameter: conversation_id' + description: The conversation identifier. /v1/conversations/{conversation_id}/items/{item_id}: delete: tags: - V1 - summary: Openai Delete Conversation Item + summary: Delete an item. description: Query endpoint for proper schema generation. operationId: openai_delete_conversation_item_v1_conversations__conversation_id__items__item_id__delete parameters: @@ -5298,12 +5319,14 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. - name: item_id in: path required: true schema: type: string title: Item Id + description: The item identifier. responses: '200': description: The deleted item resource. @@ -5326,7 +5349,7 @@ paths: get: tags: - V1 - summary: Retrieve + summary: Retrieve an item. description: Query endpoint for proper schema generation. operationId: retrieve_v1_conversations__conversation_id__items__item_id__get parameters: @@ -5336,12 +5359,14 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. - name: item_id in: path required: true schema: type: string title: Item Id + description: The item identifier. responses: '200': description: The conversation item. @@ -5365,7 +5390,7 @@ paths: post: tags: - V1 - summary: Openai Embeddings + summary: Create embeddings. description: Typed endpoint for proper schema generation. operationId: openai_embeddings_v1_embeddings_post requestBody: @@ -5397,7 +5422,7 @@ paths: get: tags: - V1 - summary: Openai List Files + summary: List files. description: Query endpoint for proper schema generation. operationId: openai_list_files_v1_files_get parameters: @@ -5447,7 +5472,7 @@ paths: post: tags: - V1 - summary: Openai Upload File + summary: Upload file. description: Response-only endpoint for proper schema generation. operationId: openai_upload_file_v1_files_post responses: @@ -5473,7 +5498,7 @@ paths: delete: tags: - V1 - summary: Openai Delete File + summary: Delete file. description: Query endpoint for proper schema generation. operationId: openai_delete_file_v1_files__file_id__delete parameters: @@ -5483,6 +5508,7 @@ paths: schema: type: string title: File Id + description: The ID of the file to use for this request. responses: '200': description: An OpenAIFileDeleteResponse indicating successful deletion. @@ -5505,7 +5531,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve File + summary: Retrieve file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_file_v1_files__file_id__get parameters: @@ -5515,6 +5541,7 @@ paths: schema: type: string title: File Id + description: The ID of the file to use for this request. responses: '200': description: An OpenAIFileObject containing file information. @@ -5538,7 +5565,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve File Content + summary: Retrieve file content. description: Generic endpoint - this would be replaced with actual implementation. operationId: openai_retrieve_file_content_v1_files__file_id__content_get parameters: @@ -5557,7 +5584,7 @@ paths: required: true schema: type: string - description: 'Path parameter: file_id' + description: The ID of the file to use for this request. responses: '200': description: The raw file content as a binary response. @@ -5580,7 +5607,7 @@ paths: get: tags: - V1 - summary: Health + summary: Get health status. description: Response-only endpoint for proper schema generation. operationId: health_v1_health_get responses: @@ -5606,7 +5633,7 @@ paths: get: tags: - V1 - summary: List Routes + summary: List routes. description: Response-only endpoint for proper schema generation. operationId: list_routes_v1_inspect_routes_get responses: @@ -5632,7 +5659,7 @@ paths: get: tags: - V1 - summary: List Models + summary: List all models. description: Response-only endpoint for proper schema generation. operationId: list_models_v1_models_get responses: @@ -5657,7 +5684,7 @@ paths: post: tags: - V1 - summary: Register Model + summary: Register model. description: Typed endpoint for proper schema generation. operationId: register_model_v1_models_post requestBody: @@ -5689,7 +5716,7 @@ paths: delete: tags: - V1 - summary: Unregister Model + summary: Unregister model. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_model_v1_models__model_id__delete parameters: @@ -5730,7 +5757,7 @@ paths: get: tags: - V1 - summary: Get Model + summary: Get model. description: Query endpoint for proper schema generation. operationId: get_model_v1_models__model_id__get parameters: @@ -5763,7 +5790,7 @@ paths: post: tags: - V1 - summary: Run Moderation + summary: Create moderation. description: Typed endpoint for proper schema generation. operationId: run_moderation_v1_moderations_post requestBody: @@ -5795,7 +5822,7 @@ paths: get: tags: - V1 - summary: List Prompts + summary: List all prompts. description: Response-only endpoint for proper schema generation. operationId: list_prompts_v1_prompts_get responses: @@ -5820,7 +5847,7 @@ paths: post: tags: - V1 - summary: Create Prompt + summary: Create prompt. description: Typed endpoint for proper schema generation. operationId: create_prompt_v1_prompts_post requestBody: @@ -5852,7 +5879,7 @@ paths: delete: tags: - V1 - summary: Delete Prompt + summary: Delete prompt. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_prompt_v1_prompts__prompt_id__delete parameters: @@ -5866,13 +5893,12 @@ paths: required: true schema: title: Kwargs - - &id001 - name: prompt_id + - name: prompt_id in: path required: true schema: type: string - description: 'Path parameter: prompt_id' + description: The identifier of the prompt to delete. responses: '200': description: Successful Response @@ -5894,22 +5920,23 @@ paths: get: tags: - V1 - summary: Get Prompt + summary: Get prompt. description: Query endpoint for proper schema generation. operationId: get_prompt_v1_prompts__prompt_id__get parameters: - - name: prompt_id - in: path - required: true - schema: - type: string - title: Prompt Id - name: version in: query required: true schema: type: integer title: Version + - name: prompt_id + in: path + required: true + schema: + type: string + title: Prompt Id + description: The identifier of the prompt to get. responses: '200': description: A Prompt resource. @@ -5932,7 +5959,7 @@ paths: post: tags: - V1 - summary: Update Prompt + summary: Update prompt. description: Typed endpoint for proper schema generation. operationId: update_prompt_v1_prompts__prompt_id__post requestBody: @@ -5961,12 +5988,17 @@ paths: $ref: '#/components/responses/DefaultError' description: Default Response parameters: - - *id001 + - name: prompt_id + in: path + required: true + schema: + type: string + description: The identifier of the prompt to update. /v1/prompts/{prompt_id}/set-default-version: post: tags: - V1 - summary: Set Default Version + summary: Set prompt version. description: Typed endpoint for proper schema generation. operationId: set_default_version_v1_prompts__prompt_id__set_default_version_post requestBody: @@ -6000,12 +6032,12 @@ paths: required: true schema: type: string - description: 'Path parameter: prompt_id' + description: The identifier of the prompt. /v1/prompts/{prompt_id}/versions: get: tags: - V1 - summary: List Prompt Versions + summary: List prompt versions. description: Query endpoint for proper schema generation. operationId: list_prompt_versions_v1_prompts__prompt_id__versions_get parameters: @@ -6015,6 +6047,7 @@ paths: schema: type: string title: Prompt Id + description: The identifier of the prompt to list versions for. responses: '200': description: A ListPromptsResponse containing all versions of the prompt. @@ -6038,7 +6071,7 @@ paths: get: tags: - V1 - summary: List Providers + summary: List providers. description: Response-only endpoint for proper schema generation. operationId: list_providers_v1_providers_get responses: @@ -6064,7 +6097,7 @@ paths: get: tags: - V1 - summary: Inspect Provider + summary: Get provider. description: Query endpoint for proper schema generation. operationId: inspect_provider_v1_providers__provider_id__get parameters: @@ -6074,6 +6107,7 @@ paths: schema: type: string title: Provider Id + description: The ID of the provider to inspect. responses: '200': description: A ProviderInfo object containing the provider's details. @@ -6097,7 +6131,7 @@ paths: get: tags: - V1 - summary: List Openai Responses + summary: List all responses. description: Query endpoint for proper schema generation. operationId: list_openai_responses_v1_responses_get parameters: @@ -6148,7 +6182,7 @@ paths: post: tags: - V1 - summary: Create Openai Response + summary: Create a model response. description: Typed endpoint for proper schema generation. operationId: create_openai_response_v1_responses_post requestBody: @@ -6180,7 +6214,7 @@ paths: delete: tags: - V1 - summary: Delete Openai Response + summary: Delete a response. description: Query endpoint for proper schema generation. operationId: delete_openai_response_v1_responses__response_id__delete parameters: @@ -6190,6 +6224,7 @@ paths: schema: type: string title: Response Id + description: The ID of the OpenAI response to delete. responses: '200': description: An OpenAIDeleteResponseObject @@ -6212,7 +6247,7 @@ paths: get: tags: - V1 - summary: Get Openai Response + summary: Get a model response. description: Query endpoint for proper schema generation. operationId: get_openai_response_v1_responses__response_id__get parameters: @@ -6222,6 +6257,7 @@ paths: schema: type: string title: Response Id + description: The ID of the OpenAI response to retrieve. responses: '200': description: An OpenAIResponseObject. @@ -6245,16 +6281,10 @@ paths: get: tags: - V1 - summary: List Openai Response Input Items + summary: List input items. description: Query endpoint for proper schema generation. operationId: list_openai_response_input_items_v1_responses__response_id__input_items_get parameters: - - name: response_id - in: path - required: true - schema: - type: string - title: Response Id - name: after in: query required: true @@ -6286,6 +6316,13 @@ paths: schema: $ref: '#/components/schemas/Order' default: desc + - name: response_id + in: path + required: true + schema: + type: string + title: Response Id + description: The ID of the response to retrieve input items for. responses: '200': description: An ListOpenAIResponseInputItem. @@ -6309,7 +6346,7 @@ paths: post: tags: - V1 - summary: Run Shield + summary: Run shield. description: Typed endpoint for proper schema generation. operationId: run_shield_v1_safety_run_shield_post requestBody: @@ -6341,7 +6378,7 @@ paths: get: tags: - V1 - summary: List Scoring Functions + summary: List all scoring functions. description: Response-only endpoint for proper schema generation. operationId: list_scoring_functions_v1_scoring_functions_get responses: @@ -6366,7 +6403,7 @@ paths: post: tags: - V1 - summary: Register Scoring Function + summary: Register a scoring function. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_scoring_function_v1_scoring_functions_post parameters: @@ -6402,7 +6439,7 @@ paths: delete: tags: - V1 - summary: Unregister Scoring Function + summary: Unregister a scoring function. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_scoring_function_v1_scoring_functions__scoring_fn_id__delete parameters: @@ -6443,7 +6480,7 @@ paths: get: tags: - V1 - summary: Get Scoring Function + summary: Get a scoring function by its ID. description: Query endpoint for proper schema generation. operationId: get_scoring_function_v1_scoring_functions__scoring_fn_id__get parameters: @@ -6476,7 +6513,7 @@ paths: post: tags: - V1 - summary: Score + summary: Score a list of rows. description: Typed endpoint for proper schema generation. operationId: score_v1_scoring_score_post requestBody: @@ -6508,7 +6545,7 @@ paths: post: tags: - V1 - summary: Score Batch + summary: Score a batch of rows. description: Typed endpoint for proper schema generation. operationId: score_batch_v1_scoring_score_batch_post requestBody: @@ -6540,7 +6577,7 @@ paths: get: tags: - V1 - summary: List Shields + summary: List all shields. description: Response-only endpoint for proper schema generation. operationId: list_shields_v1_shields_get responses: @@ -6565,7 +6602,7 @@ paths: post: tags: - V1 - summary: Register Shield + summary: Register a shield. description: Typed endpoint for proper schema generation. operationId: register_shield_v1_shields_post requestBody: @@ -6597,7 +6634,7 @@ paths: delete: tags: - V1 - summary: Unregister Shield + summary: Unregister a shield. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_shield_v1_shields__identifier__delete parameters: @@ -6638,7 +6675,7 @@ paths: get: tags: - V1 - summary: Get Shield + summary: Get a shield by its identifier. description: Query endpoint for proper schema generation. operationId: get_shield_v1_shields__identifier__get parameters: @@ -6671,7 +6708,7 @@ paths: post: tags: - V1 - summary: Invoke Tool + summary: Run a tool with the given arguments. description: Typed endpoint for proper schema generation. operationId: invoke_tool_v1_tool_runtime_invoke_post requestBody: @@ -6703,7 +6740,7 @@ paths: get: tags: - V1 - summary: List Runtime Tools + summary: List all tools in the runtime. description: Query endpoint for proper schema generation. operationId: list_runtime_tools_v1_tool_runtime_list_tools_get parameters: @@ -6742,7 +6779,7 @@ paths: post: tags: - V1 - summary: Rag Tool.Insert + summary: Index documents so they can be used by the RAG system. description: Generic endpoint - this would be replaced with actual implementation. operationId: rag_tool_insert_v1_tool_runtime_rag_tool_insert_post parameters: @@ -6778,7 +6815,7 @@ paths: post: tags: - V1 - summary: Rag Tool.Query + summary: Query the RAG system for context; typically invoked by the agent. description: Typed endpoint for proper schema generation. operationId: rag_tool_query_v1_tool_runtime_rag_tool_query_post requestBody: @@ -6810,7 +6847,7 @@ paths: get: tags: - V1 - summary: List Tool Groups + summary: List tool groups with optional provider. description: Response-only endpoint for proper schema generation. operationId: list_tool_groups_v1_toolgroups_get responses: @@ -6835,7 +6872,7 @@ paths: post: tags: - V1 - summary: Register Tool Group + summary: Register a tool group. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_tool_group_v1_toolgroups_post parameters: @@ -6871,7 +6908,7 @@ paths: delete: tags: - V1 - summary: Unregister Toolgroup + summary: Unregister a tool group. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_toolgroup_v1_toolgroups__toolgroup_id__delete parameters: @@ -6912,7 +6949,7 @@ paths: get: tags: - V1 - summary: Get Tool Group + summary: Get a tool group by its ID. description: Query endpoint for proper schema generation. operationId: get_tool_group_v1_toolgroups__toolgroup_id__get parameters: @@ -6945,7 +6982,7 @@ paths: get: tags: - V1 - summary: List Tools + summary: List tools with optional tool group. description: Query endpoint for proper schema generation. operationId: list_tools_v1_tools_get parameters: @@ -6978,7 +7015,7 @@ paths: get: tags: - V1 - summary: Get Tool + summary: Get a tool by its name. description: Query endpoint for proper schema generation. operationId: get_tool_v1_tools__tool_name__get parameters: @@ -7011,7 +7048,7 @@ paths: post: tags: - V1 - summary: Insert Chunks + summary: Insert chunks into a vector database. description: Generic endpoint - this would be replaced with actual implementation. operationId: insert_chunks_v1_vector_io_insert_post parameters: @@ -7047,7 +7084,7 @@ paths: post: tags: - V1 - summary: Query Chunks + summary: Query chunks from a vector database. description: Typed endpoint for proper schema generation. operationId: query_chunks_v1_vector_io_query_post requestBody: @@ -7079,7 +7116,7 @@ paths: get: tags: - V1 - summary: Openai List Vector Stores + summary: Returns a list of vector stores. description: Query endpoint for proper schema generation. operationId: openai_list_vector_stores_v1_vector_stores_get parameters: @@ -7131,7 +7168,7 @@ paths: post: tags: - V1 - summary: Openai Create Vector Store + summary: Creates a vector store. description: Typed endpoint for proper schema generation. operationId: openai_create_vector_store_v1_vector_stores_post requestBody: @@ -7163,7 +7200,7 @@ paths: delete: tags: - V1 - summary: Openai Delete Vector Store + summary: Delete a vector store. description: Query endpoint for proper schema generation. operationId: openai_delete_vector_store_v1_vector_stores__vector_store_id__delete parameters: @@ -7173,6 +7210,7 @@ paths: schema: type: string title: Vector Store Id + description: The ID of the vector store to delete. responses: '200': description: A VectorStoreDeleteResponse indicating the deletion status. @@ -7195,7 +7233,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve Vector Store + summary: Retrieves a vector store. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_v1_vector_stores__vector_store_id__get parameters: @@ -7205,6 +7243,7 @@ paths: schema: type: string title: Vector Store Id + description: The ID of the vector store to retrieve. responses: '200': description: A VectorStoreObject representing the vector store. @@ -7227,7 +7266,7 @@ paths: post: tags: - V1 - summary: Openai Update Vector Store + summary: Updates a vector store. description: Typed endpoint for proper schema generation. operationId: openai_update_vector_store_v1_vector_stores__vector_store_id__post requestBody: @@ -7261,12 +7300,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to update. /v1/vector_stores/{vector_store_id}/file_batches: post: tags: - V1 - summary: Openai Create Vector Store File Batch + summary: Create a vector store file batch. description: Typed endpoint for proper schema generation. operationId: openai_create_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches_post requestBody: @@ -7300,12 +7339,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to create the file batch for. /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}: get: tags: - V1 - summary: Openai Retrieve Vector Store File Batch + summary: Retrieve a vector store file batch. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__get parameters: @@ -7315,12 +7354,14 @@ paths: schema: type: string title: Batch Id + description: The ID of the file batch to retrieve. - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file batch. responses: '200': description: A VectorStoreFileBatchObject representing the file batch. @@ -7344,7 +7385,7 @@ paths: post: tags: - V1 - summary: Openai Cancel Vector Store File Batch + summary: Cancels a vector store file batch. description: Typed endpoint for proper schema generation. operationId: openai_cancel_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post requestBody: @@ -7373,38 +7414,26 @@ paths: description: Default Response $ref: '#/components/responses/DefaultError' parameters: - - name: vector_store_id - in: path - required: true - schema: - type: string - description: 'Path parameter: vector_store_id' - name: batch_id in: path required: true schema: type: string - description: 'Path parameter: batch_id' + description: The ID of the file batch to cancel. + - name: vector_store_id + in: path + required: true + schema: + type: string + description: The ID of the vector store containing the file batch. /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files: get: tags: - V1 - summary: Openai List Files In Vector Store File Batch + summary: Returns a list of vector store files in a batch. description: Query endpoint for proper schema generation. operationId: openai_list_files_in_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get parameters: - - name: batch_id - in: path - required: true - schema: - type: string - title: Batch Id - - name: vector_store_id - in: path - required: true - schema: - type: string - title: Vector Store Id - name: after in: query required: true @@ -7437,6 +7466,20 @@ paths: type: string default: desc title: Order + - name: batch_id + in: path + required: true + schema: + type: string + title: Batch Id + description: The ID of the file batch to list files from. + - name: vector_store_id + in: path + required: true + schema: + type: string + title: Vector Store Id + description: The ID of the vector store containing the file batch. responses: '200': description: A VectorStoreFilesListInBatchResponse containing the list of files in the batch. @@ -7460,16 +7503,10 @@ paths: get: tags: - V1 - summary: Openai List Files In Vector Store + summary: List files in a vector store. description: Query endpoint for proper schema generation. operationId: openai_list_files_in_vector_store_v1_vector_stores__vector_store_id__files_get parameters: - - name: vector_store_id - in: path - required: true - schema: - type: string - title: Vector Store Id - name: after in: query required: true @@ -7502,6 +7539,13 @@ paths: type: string default: desc title: Order + - name: vector_store_id + in: path + required: true + schema: + type: string + title: Vector Store Id + description: The ID of the vector store to list files from. responses: '200': description: A VectorStoreListFilesResponse containing the list of files. @@ -7524,7 +7568,7 @@ paths: post: tags: - V1 - summary: Openai Attach File To Vector Store + summary: Attach a file to a vector store. description: Typed endpoint for proper schema generation. operationId: openai_attach_file_to_vector_store_v1_vector_stores__vector_store_id__files_post requestBody: @@ -7558,27 +7602,29 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to attach the file to. /v1/vector_stores/{vector_store_id}/files/{file_id}: delete: tags: - V1 - summary: Openai Delete Vector Store File + summary: Delete a vector store file. description: Query endpoint for proper schema generation. operationId: openai_delete_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__delete parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to delete. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to delete. responses: '200': description: A VectorStoreFileDeleteResponse indicating the deletion status. @@ -7601,22 +7647,24 @@ paths: get: tags: - V1 - summary: Openai Retrieve Vector Store File + summary: Retrieves a vector store file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__get parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to retrieve. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to retrieve. responses: '200': description: A VectorStoreFileObject representing the file. @@ -7639,7 +7687,7 @@ paths: post: tags: - V1 - summary: Openai Update Vector Store File + summary: Updates a vector store file. description: Typed endpoint for proper schema generation. operationId: openai_update_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__post requestBody: @@ -7673,33 +7721,35 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store containing the file to update. - name: file_id in: path required: true schema: type: string - description: 'Path parameter: file_id' + description: The ID of the file to update. /v1/vector_stores/{vector_store_id}/files/{file_id}/content: get: tags: - V1 - summary: Openai Retrieve Vector Store File Contents + summary: Retrieves the contents of a vector store file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_contents_v1_vector_stores__vector_store_id__files__file_id__content_get parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to retrieve. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to retrieve. responses: '200': description: A list of InterleavedContent representing the file contents. @@ -7723,7 +7773,7 @@ paths: post: tags: - V1 - summary: Openai Search Vector Store + summary: Search for chunks in a vector store. description: Typed endpoint for proper schema generation. operationId: openai_search_vector_store_v1_vector_stores__vector_store_id__search_post requestBody: @@ -7757,12 +7807,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to search. /v1/version: get: tags: - V1 - summary: Version + summary: Get version. description: Response-only endpoint for proper schema generation. operationId: version_v1_version_get responses: @@ -7799,7 +7849,7 @@ components: required: - config title: AgentCandidate - description: "An agent candidate for evaluation.\n\n:param config: The configuration for the agent candidate." + description: An agent candidate for evaluation. AgentConfig: properties: sampling_params: @@ -7866,7 +7916,7 @@ components: - model - instructions title: AgentConfig - description: "Configuration for an agent.\n\n:param model: The model identifier to use for the agent\n:param instructions: The system instructions for the agent\n:param name: Optional name for the agent, used in telemetry and identification\n:param enable_session_persistence: Optional flag indicating whether session data has to be persisted\n:param response_format: Optional response format configuration" + description: Configuration for an agent. AgentCreateResponse: properties: agent_id: @@ -7876,7 +7926,7 @@ components: required: - agent_id title: AgentCreateResponse - description: "Response returned when creating a new agent.\n\n:param agent_id: Unique identifier for the created agent" + description: Response returned when creating a new agent. AgentSessionCreateResponse: properties: session_id: @@ -7886,7 +7936,7 @@ components: required: - session_id title: AgentSessionCreateResponse - description: "Response returned when creating a new agent session.\n\n:param session_id: Unique identifier for the created session" + description: Response returned when creating a new agent session. AgentToolGroupWithArgs: properties: name: @@ -7910,7 +7960,7 @@ components: default: agent_turn_input type: object title: AgentTurnInputType - description: "Parameter type for agent turn input.\n\n:param type: Discriminator type. Always \"agent_turn_input\"" + description: Parameter type for agent turn input. AggregationFunctionType: type: string enum: @@ -7920,7 +7970,7 @@ components: - categorical_count - accuracy title: AggregationFunctionType - description: "Types of aggregation functions for scoring results.\n:cvar average: Calculate the arithmetic mean of scores\n:cvar weighted_average: Calculate a weighted average of scores\n:cvar median: Calculate the median value of scores\n:cvar categorical_count: Count occurrences of categorical values\n:cvar accuracy: Calculate accuracy as the proportion of correct answers" + description: Types of aggregation functions for scoring results. AllowedToolsFilter: properties: tool_names: @@ -7930,7 +7980,7 @@ components: type: array type: object title: AllowedToolsFilter - description: "Filter configuration for restricting which MCP tools can be used.\n\n:param tool_names: (Optional) List of specific tool names that are allowed" + description: Filter configuration for restricting which MCP tools can be used. ApprovalFilter: properties: always: @@ -7945,7 +7995,7 @@ components: type: array type: object title: ApprovalFilter - description: "Filter configuration for MCP tool approval requirements.\n\n:param always: (Optional) List of tool names that always require approval\n:param never: (Optional) List of tool names that never require approval" + description: Filter configuration for MCP tool approval requirements. ArrayType: properties: type: @@ -7955,7 +8005,7 @@ components: default: array type: object title: ArrayType - description: "Parameter type for array values.\n\n:param type: Discriminator type. Always \"array\"" + description: Parameter type for array values. Attachment-Output: properties: content: @@ -7989,7 +8039,7 @@ components: - content - mime_type title: Attachment - description: "An attachment to an agent turn.\n\n:param content: The content of the attachment.\n:param mime_type: The MIME type of the attachment." + description: An attachment to an agent turn. BasicScoringFnParams: properties: type: @@ -8005,7 +8055,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: BasicScoringFnParams - description: "Parameters for basic scoring function configuration.\n:param type: The type of scoring function parameters, always basic\n:param aggregation_functions: Aggregation functions to apply to the scores of each row" + description: Parameters for basic scoring function configuration. Batch: properties: id: @@ -8192,7 +8242,7 @@ components: - dataset_id - scoring_functions title: Benchmark - description: "A benchmark resource for evaluating model performance.\n\n:param dataset_id: Identifier of the dataset to use for the benchmark evaluation\n:param scoring_functions: List of scoring function identifiers to apply during evaluation\n:param metadata: Metadata for this evaluation task\n:param type: The resource type, always benchmark" + description: A benchmark resource for evaluating model performance. BenchmarkConfig: properties: eval_candidate: @@ -8228,7 +8278,7 @@ components: required: - eval_candidate title: BenchmarkConfig - description: "A benchmark configuration for evaluation.\n\n:param eval_candidate: The candidate to evaluate.\n:param scoring_params: Map between scoring function id and parameters for each scoring function you want to run\n:param num_examples: (Optional) The number of examples to evaluate. If not provided, all examples in the dataset will be evaluated" + description: A benchmark configuration for evaluation. BooleanType: properties: type: @@ -8238,7 +8288,7 @@ components: default: boolean type: object title: BooleanType - description: "Parameter type for boolean values.\n\n:param type: Discriminator type. Always \"boolean\"" + description: Parameter type for boolean values. BuiltinTool: type: string enum: @@ -8256,7 +8306,7 @@ components: default: chat_completion_input type: object title: ChatCompletionInputType - description: "Parameter type for chat completion input.\n\n:param type: Discriminator type. Always \"chat_completion_input\"" + description: Parameter type for chat completion input. Chunk-Output: properties: content: @@ -8300,7 +8350,7 @@ components: - content - chunk_id title: Chunk - description: "A chunk of content that can be inserted into a vector database.\n:param content: The content of the chunk, which can be interleaved text, images, or other types.\n:param chunk_id: Unique identifier for the chunk. Must be provided explicitly.\n:param metadata: Metadata associated with the chunk that will be used in the model context during inference.\n:param embedding: Optional embedding for the chunk. If not provided, it will be computed later.\n:param chunk_metadata: Metadata for the chunk that will NOT be used in the context during inference.\n The `chunk_metadata` is required backend functionality." + description: A chunk of content that can be inserted into a vector database. ChunkMetadata: properties: chunk_id: @@ -8338,7 +8388,7 @@ components: type: integer type: object title: ChunkMetadata - description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference.\n:param chunk_id: The ID of the chunk. If not set, it will be generated based on the document ID and content.\n:param document_id: The ID of the document this chunk belongs to.\n:param source: The source of the content, such as a URL, file path, or other identifier.\n:param created_timestamp: An optional timestamp indicating when the chunk was created.\n:param updated_timestamp: An optional timestamp indicating when the chunk was last updated.\n:param chunk_window: The window of the chunk, which can be used to group related chunks together.\n:param chunk_tokenizer: The tokenizer used to create the chunk. Default is Tiktoken.\n:param chunk_embedding_model: The embedding model used to create the chunk's embedding.\n:param chunk_embedding_dimension: The dimension of the embedding vector for the chunk.\n:param content_token_count: The number of tokens in the content of the chunk.\n:param metadata_token_count: The number of tokens in the metadata of the chunk." + description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference." CompletionInputType: properties: type: @@ -8348,7 +8398,7 @@ components: default: completion_input type: object title: CompletionInputType - description: "Parameter type for completion input.\n\n:param type: Discriminator type. Always \"completion_input\"" + description: Parameter type for completion input. CompletionMessage-Output: properties: role: @@ -8390,7 +8440,7 @@ components: - content - stop_reason title: CompletionMessage - description: "A message containing the model's (assistant) response in a chat conversation.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param stop_reason: Reason why the model stopped generating. Options are:\n - `StopReason.end_of_turn`: The model finished generating the entire response.\n - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response.\n - `StopReason.out_of_tokens`: The model ran out of token budget.\n:param tool_calls: List of tool calls. Each tool call is a ToolCall object." + description: A message containing the model's (assistant) response in a chat conversation. Conversation: properties: id: @@ -8502,7 +8552,7 @@ components: required: - beta title: DPOAlignmentConfig - description: "Configuration for Direct Preference Optimization (DPO) alignment.\n\n:param beta: Temperature parameter for the DPO loss\n:param loss_type: The type of loss function to use for DPO" + description: Configuration for Direct Preference Optimization (DPO) alignment. DPOLossType: type: string enum: @@ -8542,7 +8592,7 @@ components: - shuffle - data_format title: DataConfig - description: "Configuration for training data and data loading.\n\n:param dataset_id: Unique identifier for the training dataset\n:param batch_size: Number of samples per training batch\n:param shuffle: Whether to shuffle the dataset during training\n:param data_format: Format of the dataset (instruct or dialog)\n:param validation_dataset_id: (Optional) Unique identifier for the validation dataset\n:param packed: (Optional) Whether to pack multiple samples into a single sequence for efficiency\n:param train_on_input: (Optional) Whether to compute loss on input tokens as well as output tokens" + description: Configuration for training data and data loading. Dataset: properties: identifier: @@ -8586,14 +8636,14 @@ components: - purpose - source title: Dataset - description: "Dataset resource for storing and accessing training or evaluation data.\n\n:param type: Type of resource, always 'dataset' for datasets" + description: Dataset resource for storing and accessing training or evaluation data. DatasetFormat: type: string enum: - instruct - dialog title: DatasetFormat - description: "Format of the training dataset.\n:cvar instruct: Instruction-following format with prompt and completion\n:cvar dialog: Multi-turn conversation format with messages" + description: Format of the training dataset. DatasetPurpose: type: string enum: @@ -8601,7 +8651,7 @@ components: - eval/question-answer - eval/messages-answer title: DatasetPurpose - description: "Purpose of the dataset. Each purpose has a required input data schema.\n\n:cvar post-training/messages: The dataset contains messages used for post-training.\n {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello, world!\"},\n {\"role\": \"assistant\", \"content\": \"Hello, world!\"},\n ]\n }\n:cvar eval/question-answer: The dataset contains a question column and an answer column.\n {\n \"question\": \"What is the capital of France?\",\n \"answer\": \"Paris\"\n }\n:cvar eval/messages-answer: The dataset contains a messages column with list of messages and an answer column.\n {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello, my name is John Doe.\"},\n {\"role\": \"assistant\", \"content\": \"Hello, John Doe. How can I help you today?\"},\n {\"role\": \"user\", \"content\": \"What's my name?\"},\n ],\n \"answer\": \"John Doe\"\n }" + description: Purpose of the dataset. Each purpose has a required input data schema. DefaultRAGQueryGeneratorConfig: properties: type: @@ -8615,7 +8665,7 @@ components: default: ' ' type: object title: DefaultRAGQueryGeneratorConfig - description: "Configuration for the default RAG query generator.\n\n:param type: Type of query generator, always 'default'\n:param separator: String separator used to join query terms" + description: Configuration for the default RAG query generator. Document: properties: content: @@ -8649,7 +8699,7 @@ components: - content - mime_type title: Document - description: "A document to be used by an agent.\n\n:param content: The content of the document.\n:param mime_type: The MIME type of the document." + description: A document to be used by an agent. EfficiencyConfig: properties: enable_activation_checkpointing: @@ -8670,7 +8720,7 @@ components: type: boolean type: object title: EfficiencyConfig - description: "Configuration for memory and compute efficiency optimizations.\n\n:param enable_activation_checkpointing: (Optional) Whether to use activation checkpointing to reduce memory usage\n:param enable_activation_offloading: (Optional) Whether to offload activations to CPU to save GPU memory\n:param memory_efficient_fsdp_wrap: (Optional) Whether to use memory-efficient FSDP wrapping\n:param fsdp_cpu_offload: (Optional) Whether to offload FSDP parameters to CPU" + description: Configuration for memory and compute efficiency optimizations. Errors: properties: data: @@ -8702,7 +8752,7 @@ components: - generations - scores title: EvaluateResponse - description: "The response from an evaluation.\n\n:param generations: The generations from the evaluation.\n:param scores: The scores from the evaluation." + description: The response from an evaluation. GrammarResponseFormat: properties: type: @@ -8718,7 +8768,7 @@ components: required: - bnf title: GrammarResponseFormat - description: "Configuration for grammar-guided response generation.\n\n:param type: Must be \"grammar\" to identify this format type\n:param bnf: The BNF grammar specification the response should conform to" + description: Configuration for grammar-guided response generation. GreedySamplingStrategy: properties: type: @@ -8728,7 +8778,7 @@ components: default: greedy type: object title: GreedySamplingStrategy - description: "Greedy sampling strategy that selects the highest probability token at each step.\n\n:param type: Must be \"greedy\" to identify this sampling strategy" + description: Greedy sampling strategy that selects the highest probability token at each step. HealthInfo: properties: status: @@ -8737,7 +8787,7 @@ components: required: - status title: HealthInfo - description: "Health status information for the service.\n\n:param status: Current health status of the service" + description: Health status information for the service. HealthStatus: type: string enum: @@ -8758,7 +8808,7 @@ components: required: - image title: ImageContentItem - description: "A image content item\n\n:param type: Discriminator type of the content item. Always \"image\"\n:param image: Image as a base64 encoded string or an URL" + description: A image content item ImageContentItem-Output: properties: type: @@ -8772,7 +8822,7 @@ components: required: - image title: ImageContentItem - description: "A image content item\n\n:param type: Discriminator type of the content item. Always \"image\"\n:param image: Image as a base64 encoded string or an URL" + description: A image content item InferenceStep-Output: properties: turn_id: @@ -8802,7 +8852,7 @@ components: - step_id - model_response title: InferenceStep - description: "An inference step in an agent turn.\n\n:param model_response: The response from the LLM." + description: An inference step in an agent turn. InputTokensDetails: properties: cached_tokens: @@ -8825,7 +8875,7 @@ components: - job_id - status title: Job - description: "A job execution instance with status tracking.\n\n:param job_id: Unique identifier for the job\n:param status: Current execution status of the job" + description: A job execution instance with status tracking. JobStatus: type: string enum: @@ -8835,7 +8885,7 @@ components: - scheduled - cancelled title: JobStatus - description: "Status of a job execution.\n:cvar completed: Job has finished successfully\n:cvar in_progress: Job is currently running\n:cvar failed: Job has failed during execution\n:cvar scheduled: Job is scheduled but not yet started\n:cvar cancelled: Job was cancelled before completion" + description: Status of a job execution. JsonSchemaResponseFormat: properties: type: @@ -8851,7 +8901,7 @@ components: required: - json_schema title: JsonSchemaResponseFormat - description: "Configuration for JSON schema-guided response generation.\n\n:param type: Must be \"json_schema\" to identify this format type\n:param json_schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model." + description: Configuration for JSON schema-guided response generation. JsonType: properties: type: @@ -8861,7 +8911,7 @@ components: default: json type: object title: JsonType - description: "Parameter type for JSON values.\n\n:param type: Discriminator type. Always \"json\"" + description: Parameter type for JSON values. LLMAsJudgeScoringFnParams: properties: type: @@ -8891,7 +8941,7 @@ components: required: - judge_model title: LLMAsJudgeScoringFnParams - description: "Parameters for LLM-as-judge scoring function configuration.\n:param type: The type of scoring function parameters, always llm_as_judge\n:param judge_model: Identifier of the LLM model to use as a judge for scoring\n:param prompt_template: (Optional) Custom prompt template for the judge model\n:param judge_score_regexes: Regexes to extract the answer from generated response\n:param aggregation_functions: Aggregation functions to apply to the scores of each row" + description: Parameters for LLM-as-judge scoring function configuration. LLMRAGQueryGeneratorConfig: properties: type: @@ -8910,7 +8960,7 @@ components: - model - template title: LLMRAGQueryGeneratorConfig - description: "Configuration for the LLM-based RAG query generator.\n\n:param type: Type of query generator, always 'llm'\n:param model: Name of the language model to use for query generation\n:param template: Template string for formatting the query generation prompt" + description: Configuration for the LLM-based RAG query generator. ListBenchmarksResponse: properties: data: @@ -8933,7 +8983,7 @@ components: required: - data title: ListDatasetsResponse - description: "Response from listing datasets.\n\n:param data: List of datasets" + description: Response from listing datasets. ListModelsResponse: properties: data: @@ -8979,7 +9029,7 @@ components: required: - data title: ListProvidersResponse - description: "Response containing a list of all available providers.\n\n:param data: List of provider information objects" + description: Response containing a list of all available providers. ListRoutesResponse: properties: data: @@ -8991,7 +9041,7 @@ components: required: - data title: ListRoutesResponse - description: "Response containing a list of all available API routes.\n\n:param data: List of available route information objects" + description: Response containing a list of all available API routes. ListScoringFunctionsResponse: properties: data: @@ -9025,7 +9075,7 @@ components: required: - data title: ListToolGroupsResponse - description: "Response containing a list of tool groups.\n\n:param data: List of tool groups" + description: Response containing a list of tool groups. MCPListToolsTool: properties: input_schema: @@ -9043,7 +9093,7 @@ components: - input_schema - name title: MCPListToolsTool - description: "Tool definition returned by MCP list tools operation.\n\n:param input_schema: JSON schema defining the tool's input parameters\n:param name: Name of the tool\n:param description: (Optional) Description of what the tool does" + description: Tool definition returned by MCP list tools operation. MemoryRetrievalStep-Output: properties: turn_id: @@ -9097,7 +9147,7 @@ components: - vector_store_ids - inserted_context title: MemoryRetrievalStep - description: "A memory retrieval step in an agent turn.\n\n:param vector_store_ids: The IDs of the vector databases to retrieve context from.\n:param inserted_context: The context retrieved from the vector databases." + description: A memory retrieval step in an agent turn. Model: properties: identifier: @@ -9130,7 +9180,7 @@ components: - identifier - provider_id title: Model - description: "A model resource representing an AI model registered in Llama Stack.\n\n:param type: The resource type, always 'model' for model resources\n:param model_type: The type of model (LLM or embedding model)\n:param metadata: Any additional metadata for this model\n:param identifier: Unique identifier for this resource in llama stack\n:param provider_resource_id: Unique identifier for this resource in the provider\n:param provider_id: ID of the provider that owns this resource" + description: A model resource representing an AI model registered in Llama Stack. ModelCandidate: properties: type: @@ -9150,7 +9200,7 @@ components: - model - sampling_params title: ModelCandidate - description: "A model candidate for evaluation.\n\n:param model: The model ID to evaluate.\n:param sampling_params: The sampling parameters for the model.\n:param system_message: (Optional) The system message providing instructions or context to the model." + description: A model candidate for evaluation. ModelType: type: string enum: @@ -9158,7 +9208,7 @@ components: - embedding - rerank title: ModelType - description: "Enumeration of supported model types in Llama Stack.\n:cvar llm: Large language model for text generation and completion\n:cvar embedding: Embedding model for converting text to vector representations\n:cvar rerank: Reranking model for reordering documents based on their relevance to a query" + description: Enumeration of supported model types in Llama Stack. ModerationObject: properties: id: @@ -9178,7 +9228,7 @@ components: - model - results title: ModerationObject - description: "A moderation object.\n:param id: The unique identifier for the moderation request.\n:param model: The model used to generate the moderation results.\n:param results: A list of moderation objects" + description: A moderation object. ModerationObjectResults: properties: flagged: @@ -9212,7 +9262,7 @@ components: required: - flagged title: ModerationObjectResults - description: "A moderation object.\n:param flagged: Whether any of the below categories are flagged.\n:param categories: A list of the categories, and whether they are flagged or not.\n:param category_applied_input_types: A list of the categories along with the input type(s) that the score applies to.\n:param category_scores: A list of the categories along with their scores as predicted by model." + description: A moderation object. NumberType: properties: type: @@ -9222,7 +9272,7 @@ components: default: number type: object title: NumberType - description: "Parameter type for numeric values.\n\n:param type: Discriminator type. Always \"number\"" + description: Parameter type for numeric values. ObjectType: properties: type: @@ -9232,7 +9282,7 @@ components: default: object type: object title: ObjectType - description: "Parameter type for object values.\n\n:param type: Discriminator type. Always \"object\"" + description: Parameter type for object values. OpenAIAssistantMessageParam-Input: properties: role: @@ -9257,7 +9307,7 @@ components: type: array type: object title: OpenAIAssistantMessageParam - description: "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param name: (Optional) The name of the assistant message participant.\n:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. OpenAIAssistantMessageParam-Output: properties: role: @@ -9282,7 +9332,7 @@ components: type: array type: object title: OpenAIAssistantMessageParam - description: "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param name: (Optional) The name of the assistant message participant.\n:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. OpenAIChatCompletion: properties: id: @@ -9313,7 +9363,7 @@ components: - created - model title: OpenAIChatCompletion - description: "Response from an OpenAI-compatible chat completion request.\n\n:param id: The ID of the chat completion\n:param choices: List of choices\n:param object: The object type, which will be \"chat.completion\"\n:param created: The Unix timestamp in seconds when the chat completion was created\n:param model: The model that was used to generate the chat completion\n:param usage: Token usage information for the completion" + description: Response from an OpenAI-compatible chat completion request. OpenAIChatCompletionContentPartImageParam: properties: type: @@ -9327,7 +9377,7 @@ components: required: - image_url title: OpenAIChatCompletionContentPartImageParam - description: "Image content part for OpenAI-compatible chat completion messages.\n\n:param type: Must be \"image_url\" to identify this as image content\n:param image_url: Image URL specification and processing details" + description: Image content part for OpenAI-compatible chat completion messages. OpenAIChatCompletionContentPartTextParam: properties: type: @@ -9342,7 +9392,7 @@ components: required: - text title: OpenAIChatCompletionContentPartTextParam - description: "Text content part for OpenAI-compatible chat completion messages.\n\n:param type: Must be \"text\" to identify this as text content\n:param text: The text content of the message" + description: Text content part for OpenAI-compatible chat completion messages. OpenAIChatCompletionRequestWithExtraBody: properties: model: @@ -9464,7 +9514,7 @@ components: - model - messages title: OpenAIChatCompletionRequestWithExtraBody - description: "Request parameters for OpenAI-compatible chat completion endpoint.\n\n:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.\n:param messages: List of messages in the conversation.\n:param frequency_penalty: (Optional) The penalty for repeated tokens.\n:param function_call: (Optional) The function call to use.\n:param functions: (Optional) List of functions to use.\n:param logit_bias: (Optional) The logit bias to use.\n:param logprobs: (Optional) The log probabilities to use.\n:param max_completion_tokens: (Optional) The maximum number of tokens to generate.\n:param max_tokens: (Optional) The maximum number of tokens to generate.\n:param n: (Optional) The number of completions to generate.\n:param parallel_tool_calls: (Optional) Whether to parallelize tool calls.\n:param presence_penalty: (Optional) The penalty for repeated tokens.\n:param response_format: (Optional) The response format to use.\n:param seed: (Optional) The seed to use.\n:param stop: (Optional) The stop tokens to use.\n:param stream: (Optional) Whether to stream the response.\n:param stream_options: (Optional) The stream options to use.\n:param temperature: (Optional) The temperature to use.\n:param tool_choice: (Optional) The tool choice to use.\n:param tools: (Optional) The tools to use.\n:param top_logprobs: (Optional) The top log probabilities to use.\n:param top_p: (Optional) The top p to use.\n:param user: (Optional) The user to use." + description: Request parameters for OpenAI-compatible chat completion endpoint. OpenAIChatCompletionToolCall: properties: index: @@ -9482,7 +9532,7 @@ components: $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' type: object title: OpenAIChatCompletionToolCall - description: "Tool call specification for OpenAI-compatible chat completion responses.\n\n:param index: (Optional) Index of the tool call in the list\n:param id: (Optional) Unique identifier for the tool call\n:param type: Must be \"function\" to identify this as a function call\n:param function: (Optional) Function call details" + description: Tool call specification for OpenAI-compatible chat completion responses. OpenAIChatCompletionToolCallFunction: properties: name: @@ -9493,7 +9543,7 @@ components: type: string type: object title: OpenAIChatCompletionToolCallFunction - description: "Function call details for OpenAI-compatible tool calls.\n\n:param name: (Optional) Name of the function to call\n:param arguments: (Optional) Arguments to pass to the function as a JSON string" + description: Function call details for OpenAI-compatible tool calls. OpenAIChatCompletionUsage: properties: prompt_tokens: @@ -9515,7 +9565,7 @@ components: - completion_tokens - total_tokens title: OpenAIChatCompletionUsage - description: "Usage information for OpenAI chat completion.\n\n:param prompt_tokens: Number of tokens in the prompt\n:param completion_tokens: Number of tokens in the completion\n:param total_tokens: Total tokens used (prompt + completion)\n:param input_tokens_details: Detailed breakdown of input token usage\n:param output_tokens_details: Detailed breakdown of output token usage" + description: Usage information for OpenAI chat completion. OpenAIChatCompletionUsageCompletionTokensDetails: properties: reasoning_tokens: @@ -9523,7 +9573,7 @@ components: type: integer type: object title: OpenAIChatCompletionUsageCompletionTokensDetails - description: "Token details for output tokens in OpenAI chat completion usage.\n\n:param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)" + description: Token details for output tokens in OpenAI chat completion usage. OpenAIChatCompletionUsagePromptTokensDetails: properties: cached_tokens: @@ -9531,7 +9581,7 @@ components: type: integer type: object title: OpenAIChatCompletionUsagePromptTokensDetails - description: "Token details for prompt tokens in OpenAI chat completion usage.\n\n:param cached_tokens: Number of tokens retrieved from cache" + description: Token details for prompt tokens in OpenAI chat completion usage. OpenAIChoice-Output: properties: message: @@ -9564,7 +9614,7 @@ components: - finish_reason - index title: OpenAIChoice - description: "A choice from an OpenAI-compatible chat completion response.\n\n:param message: The message from the model\n:param finish_reason: The reason the model stopped generating\n:param index: The index of the choice\n:param logprobs: (Optional) The log probabilities for the tokens in the message" + description: A choice from an OpenAI-compatible chat completion response. OpenAIChoiceLogprobs-Output: properties: content: @@ -9579,7 +9629,7 @@ components: type: array type: object title: OpenAIChoiceLogprobs - description: "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.\n\n:param content: (Optional) The log probabilities for the tokens in the message\n:param refusal: (Optional) The log probabilities for the tokens in the message" + description: The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response. OpenAICompletion: properties: id: @@ -9608,7 +9658,7 @@ components: - created - model title: OpenAICompletion - description: "Response from an OpenAI-compatible completion request.\n\n:id: The ID of the completion\n:choices: List of choices\n:created: The Unix timestamp in seconds when the completion was created\n:model: The model that was used to generate the completion\n:object: The object type, which will be \"text_completion\"" + description: Response from an OpenAI-compatible completion request. OpenAICompletionChoice-Output: properties: finish_reason: @@ -9628,7 +9678,7 @@ components: - text - index title: OpenAICompletionChoice - description: "A choice from an OpenAI-compatible completion response.\n\n:finish_reason: The reason the model stopped generating\n:text: The text of the choice\n:index: The index of the choice\n:logprobs: (Optional) The log probabilities for the tokens in the choice" + description: A choice from an OpenAI-compatible completion response. OpenAICompletionRequestWithExtraBody: properties: model: @@ -9710,7 +9760,7 @@ components: - model - prompt title: OpenAICompletionRequestWithExtraBody - description: "Request parameters for OpenAI-compatible completion endpoint.\n\n:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.\n:param prompt: The prompt to generate a completion for.\n:param best_of: (Optional) The number of completions to generate.\n:param echo: (Optional) Whether to echo the prompt.\n:param frequency_penalty: (Optional) The penalty for repeated tokens.\n:param logit_bias: (Optional) The logit bias to use.\n:param logprobs: (Optional) The log probabilities to use.\n:param max_tokens: (Optional) The maximum number of tokens to generate.\n:param n: (Optional) The number of completions to generate.\n:param presence_penalty: (Optional) The penalty for repeated tokens.\n:param seed: (Optional) The seed to use.\n:param stop: (Optional) The stop tokens to use.\n:param stream: (Optional) Whether to stream the response.\n:param stream_options: (Optional) The stream options to use.\n:param temperature: (Optional) The temperature to use.\n:param top_p: (Optional) The top p to use.\n:param user: (Optional) The user to use.\n:param suffix: (Optional) The suffix that should be appended to the completion." + description: Request parameters for OpenAI-compatible completion endpoint. OpenAICreateVectorStoreFileBatchRequestWithExtraBody: properties: file_ids: @@ -9737,7 +9787,7 @@ components: required: - file_ids title: OpenAICreateVectorStoreFileBatchRequestWithExtraBody - description: "Request to create a vector store file batch with extra_body support.\n\n:param file_ids: A list of File IDs that the vector store should use\n:param attributes: (Optional) Key-value attributes to store with the files\n:param chunking_strategy: (Optional) The chunking strategy used to chunk the file(s). Defaults to auto" + description: Request to create a vector store file batch with extra_body support. OpenAICreateVectorStoreRequestWithExtraBody: properties: name: @@ -9763,7 +9813,7 @@ components: additionalProperties: true type: object title: OpenAICreateVectorStoreRequestWithExtraBody - description: "Request to create a vector store with extra_body support.\n\n:param name: (Optional) A name for the vector store\n:param file_ids: List of file IDs to include in the vector store\n:param expires_after: (Optional) Expiration policy for the vector store\n:param chunking_strategy: (Optional) Strategy for splitting files into chunks\n:param metadata: Set of key-value pairs that can be attached to the vector store" + description: Request to create a vector store with extra_body support. OpenAIDeveloperMessageParam: properties: role: @@ -9785,7 +9835,7 @@ components: required: - content title: OpenAIDeveloperMessageParam - description: "A message from the developer in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"developer\" to identify this as a developer message\n:param content: The content of the developer message\n:param name: (Optional) The name of the developer message participant." + description: A message from the developer in an OpenAI-compatible chat completion request. OpenAIEmbeddingData: properties: object: @@ -9808,7 +9858,7 @@ components: - embedding - index title: OpenAIEmbeddingData - description: "A single embedding data object from an OpenAI-compatible embeddings response.\n\n:param object: The object type, which will be \"embedding\"\n:param embedding: The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")\n:param index: The index of the embedding in the input list" + description: A single embedding data object from an OpenAI-compatible embeddings response. OpenAIEmbeddingUsage: properties: prompt_tokens: @@ -9822,7 +9872,7 @@ components: - prompt_tokens - total_tokens title: OpenAIEmbeddingUsage - description: "Usage information for an OpenAI-compatible embeddings response.\n\n:param prompt_tokens: The number of tokens in the input\n:param total_tokens: The total number of tokens used" + description: Usage information for an OpenAI-compatible embeddings response. OpenAIEmbeddingsRequestWithExtraBody: properties: model: @@ -9851,7 +9901,7 @@ components: - model - input title: OpenAIEmbeddingsRequestWithExtraBody - description: "Request parameters for OpenAI-compatible embeddings endpoint.\n\n:param model: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.\n:param input: Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings.\n:param encoding_format: (Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\".\n:param dimensions: (Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n:param user: (Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse." + description: Request parameters for OpenAI-compatible embeddings endpoint. OpenAIEmbeddingsResponse: properties: object: @@ -9875,7 +9925,7 @@ components: - model - usage title: OpenAIEmbeddingsResponse - description: "Response from an OpenAI-compatible embeddings request.\n\n:param object: The object type, which will be \"list\"\n:param data: List of embedding data objects\n:param model: The model that was used to generate the embeddings\n:param usage: Usage information" + description: Response from an OpenAI-compatible embeddings request. OpenAIFile: properties: type: @@ -9935,7 +9985,7 @@ components: - filename - purpose title: OpenAIFileObject - description: "OpenAI File object as defined in the OpenAI Files API.\n\n:param object: The object type, which is always \"file\"\n:param id: The file identifier, which can be referenced in the API endpoints\n:param bytes: The size of the file, in bytes\n:param created_at: The Unix timestamp (in seconds) for when the file was created\n:param expires_at: The Unix timestamp (in seconds) for when the file expires\n:param filename: The name of the file\n:param purpose: The intended purpose of the file" + description: OpenAI File object as defined in the OpenAI Files API. OpenAIFilePurpose: type: string enum: @@ -9955,7 +10005,7 @@ components: required: - url title: OpenAIImageURL - description: "Image URL specification for OpenAI-compatible chat completion messages.\n\n:param url: URL of the image to include in the message\n:param detail: (Optional) Level of detail for image processing. Can be \"low\", \"high\", or \"auto\"" + description: Image URL specification for OpenAI-compatible chat completion messages. OpenAIJSONSchema: properties: name: @@ -9973,7 +10023,7 @@ components: type: object type: object title: OpenAIJSONSchema - description: "JSON schema specification for OpenAI-compatible structured response format.\n\n:param name: Name of the schema\n:param description: (Optional) Description of the schema\n:param strict: (Optional) Whether to enforce strict adherence to the schema\n:param schema: (Optional) The JSON schema definition" + description: JSON schema specification for OpenAI-compatible structured response format. OpenAIResponseAnnotationCitation: properties: type: @@ -10000,7 +10050,7 @@ components: - title - url title: OpenAIResponseAnnotationCitation - description: "URL citation annotation for referencing external web resources.\n\n:param type: Annotation type identifier, always \"url_citation\"\n:param end_index: End position of the citation span in the content\n:param start_index: Start position of the citation span in the content\n:param title: Title of the referenced web resource\n:param url: URL of the referenced web resource" + description: URL citation annotation for referencing external web resources. OpenAIResponseAnnotationContainerFileCitation: properties: type: @@ -10053,7 +10103,7 @@ components: - filename - index title: OpenAIResponseAnnotationFileCitation - description: "File citation annotation for referencing specific files in response content.\n\n:param type: Annotation type identifier, always \"file_citation\"\n:param file_id: Unique identifier of the referenced file\n:param filename: Name of the referenced file\n:param index: Position index of the citation within the content" + description: File citation annotation for referencing specific files in response content. OpenAIResponseAnnotationFilePath: properties: type: @@ -10086,7 +10136,7 @@ components: required: - refusal title: OpenAIResponseContentPartRefusal - description: "Refusal content within a streamed response part.\n\n:param type: Content part type identifier, always \"refusal\"\n:param refusal: Refusal text supplied by the model" + description: Refusal content within a streamed response part. OpenAIResponseError: properties: code: @@ -10100,7 +10150,7 @@ components: - code - message title: OpenAIResponseError - description: "Error details for failed OpenAI response requests.\n\n:param code: Error code identifying the type of failure\n:param message: Human-readable error message describing the failure" + description: Error details for failed OpenAI response requests. OpenAIResponseFormatJSONObject: properties: type: @@ -10110,7 +10160,7 @@ components: default: json_object type: object title: OpenAIResponseFormatJSONObject - description: "JSON object response format for OpenAI-compatible chat completion requests.\n\n:param type: Must be \"json_object\" to indicate generic JSON object response format" + description: JSON object response format for OpenAI-compatible chat completion requests. OpenAIResponseFormatJSONSchema: properties: type: @@ -10124,7 +10174,7 @@ components: required: - json_schema title: OpenAIResponseFormatJSONSchema - description: "JSON schema response format for OpenAI-compatible chat completion requests.\n\n:param type: Must be \"json_schema\" to indicate structured JSON response format\n:param json_schema: The JSON schema specification for the response" + description: JSON schema response format for OpenAI-compatible chat completion requests. OpenAIResponseFormatText: properties: type: @@ -10134,7 +10184,7 @@ components: default: text type: object title: OpenAIResponseFormatText - description: "Text response format for OpenAI-compatible chat completion requests.\n\n:param type: Must be \"text\" to indicate plain text response format" + description: Text response format for OpenAI-compatible chat completion requests. OpenAIResponseInputFunctionToolCallOutput: properties: call_id: @@ -10181,7 +10231,7 @@ components: type: string type: object title: OpenAIResponseInputMessageContentFile - description: "File content for input messages in OpenAI response format.\n\n:param type: The type of the input item. Always `input_file`.\n:param file_data: The data of the file to be sent to the model.\n:param file_id: (Optional) The ID of the file to be sent to the model.\n:param file_url: The URL of the file to be sent to the model.\n:param filename: The name of the file to be sent to the model." + description: File content for input messages in OpenAI response format. OpenAIResponseInputMessageContentImage: properties: detail: @@ -10207,7 +10257,7 @@ components: type: string type: object title: OpenAIResponseInputMessageContentImage - description: "Image content for input messages in OpenAI response format.\n\n:param detail: Level of detail for image processing, can be \"low\", \"high\", or \"auto\"\n:param type: Content type identifier, always \"input_image\"\n:param file_id: (Optional) The ID of the file to be sent to the model.\n:param image_url: (Optional) URL of the image content" + description: Image content for input messages in OpenAI response format. OpenAIResponseInputMessageContentText: properties: text: @@ -10222,7 +10272,7 @@ components: required: - text title: OpenAIResponseInputMessageContentText - description: "Text content for input messages in OpenAI response format.\n\n:param text: The text content of the input message\n:param type: Content type identifier, always \"input_text\"" + description: Text content for input messages in OpenAI response format. OpenAIResponseInputToolFileSearch: properties: type: @@ -10251,7 +10301,7 @@ components: required: - vector_store_ids title: OpenAIResponseInputToolFileSearch - description: "File search tool configuration for OpenAI response inputs.\n\n:param type: Tool type identifier, always \"file_search\"\n:param vector_store_ids: List of vector store identifiers to search within\n:param filters: (Optional) Additional filters to apply to the search\n:param max_num_results: (Optional) Maximum number of search results to return (1-50)\n:param ranking_options: (Optional) Options for ranking and scoring search results" + description: File search tool configuration for OpenAI response inputs. OpenAIResponseInputToolFunction: properties: type: @@ -10277,7 +10327,7 @@ components: - name - parameters title: OpenAIResponseInputToolFunction - description: "Function tool configuration for OpenAI response inputs.\n\n:param type: Tool type identifier, always \"function\"\n:param name: Name of the function that can be called\n:param description: (Optional) Description of what the function does\n:param parameters: (Optional) JSON schema defining the function's parameters\n:param strict: (Optional) Whether to enforce strict parameter validation" + description: Function tool configuration for OpenAI response inputs. OpenAIResponseInputToolMCP: properties: type: @@ -10316,7 +10366,7 @@ components: - server_label - server_url title: OpenAIResponseInputToolMCP - description: "Model Context Protocol (MCP) tool configuration for OpenAI response inputs.\n\n:param type: Tool type identifier, always \"mcp\"\n:param server_label: Label to identify this MCP server\n:param server_url: URL endpoint of the MCP server\n:param headers: (Optional) HTTP headers to include when connecting to the server\n:param require_approval: Approval requirement for tool calls (\"always\", \"never\", or filter)\n:param allowed_tools: (Optional) Restriction on which tools can be used from this server" + description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs. OpenAIResponseInputToolWebSearch: properties: type: @@ -10336,7 +10386,7 @@ components: pattern: ^low|medium|high$ type: object title: OpenAIResponseInputToolWebSearch - description: "Web search tool configuration for OpenAI response inputs.\n\n:param type: Web search tool type variant to use\n:param search_context_size: (Optional) Size of search context, must be \"low\", \"medium\", or \"high\"" + description: Web search tool configuration for OpenAI response inputs. OpenAIResponseMCPApprovalRequest: properties: arguments: @@ -10598,7 +10648,7 @@ components: - output - status title: OpenAIResponseObject - description: "Complete OpenAI response object containing generation results and metadata.\n\n:param created_at: Unix timestamp when the response was created\n:param error: (Optional) Error details if the response generation failed\n:param id: Unique identifier for this response\n:param model: Model identifier used for generation\n:param object: Object type identifier, always \"response\"\n:param output: List of generated output items (messages, tool calls, etc.)\n:param parallel_tool_calls: Whether tool calls can be executed in parallel\n:param previous_response_id: (Optional) ID of the previous response in a conversation\n:param prompt: (Optional) Reference to a prompt template and its variables.\n:param status: Current status of the response generation\n:param temperature: (Optional) Sampling temperature used for generation\n:param text: Text formatting configuration for the response\n:param top_p: (Optional) Nucleus sampling parameter used for generation\n:param tools: (Optional) An array of tools the model may call while generating a response.\n:param truncation: (Optional) Truncation strategy applied to the response\n:param usage: (Optional) Token usage information for the response\n:param instructions: (Optional) System message inserted into the model's context" + description: Complete OpenAI response object containing generation results and metadata. OpenAIResponseOutputMessageContentOutputText: properties: text: @@ -10658,7 +10708,7 @@ components: - queries - status title: OpenAIResponseOutputMessageFileSearchToolCall - description: "File search tool call output message for OpenAI responses.\n\n:param id: Unique identifier for this tool call\n:param queries: List of search queries executed\n:param status: Current status of the file search operation\n:param type: Tool call type identifier, always \"file_search_call\"\n:param results: (Optional) Search results returned by the file search operation" + description: File search tool call output message for OpenAI responses. OpenAIResponseOutputMessageFileSearchToolCallResults: properties: attributes: @@ -10685,7 +10735,7 @@ components: - score - text title: OpenAIResponseOutputMessageFileSearchToolCallResults - description: "Search results returned by the file search operation.\n\n:param attributes: (Optional) Key-value attributes associated with the file\n:param file_id: Unique identifier of the file containing the result\n:param filename: Name of the file containing the result\n:param score: Relevance score for this search result (between 0 and 1)\n:param text: Text content of the search result" + description: Search results returned by the file search operation. OpenAIResponseOutputMessageFunctionToolCall: properties: call_id: @@ -10714,7 +10764,7 @@ components: - name - arguments title: OpenAIResponseOutputMessageFunctionToolCall - description: "Function tool call output message for OpenAI responses.\n\n:param call_id: Unique identifier for the function call\n:param name: Name of the function being called\n:param arguments: JSON string containing the function arguments\n:param type: Tool call type identifier, always \"function_call\"\n:param id: (Optional) Additional identifier for the tool call\n:param status: (Optional) Current status of the function call execution" + description: Function tool call output message for OpenAI responses. OpenAIResponseOutputMessageMCPCall: properties: id: @@ -10747,7 +10797,7 @@ components: - name - server_label title: OpenAIResponseOutputMessageMCPCall - description: "Model Context Protocol (MCP) call output message for OpenAI responses.\n\n:param id: Unique identifier for this MCP call\n:param type: Tool call type identifier, always \"mcp_call\"\n:param arguments: JSON string containing the MCP call arguments\n:param name: Name of the MCP method being called\n:param server_label: Label identifying the MCP server handling the call\n:param error: (Optional) Error message if the MCP call failed\n:param output: (Optional) Output result from the successful MCP call" + description: Model Context Protocol (MCP) call output message for OpenAI responses. OpenAIResponseOutputMessageMCPListTools: properties: id: @@ -10772,7 +10822,7 @@ components: - server_label - tools title: OpenAIResponseOutputMessageMCPListTools - description: "MCP list tools output message containing available tools from an MCP server.\n\n:param id: Unique identifier for this MCP list tools operation\n:param type: Tool call type identifier, always \"mcp_list_tools\"\n:param server_label: Label identifying the MCP server providing the tools\n:param tools: List of available tools provided by the MCP server" + description: MCP list tools output message containing available tools from an MCP server. OpenAIResponseOutputMessageWebSearchToolCall: properties: id: @@ -10791,7 +10841,7 @@ components: - id - status title: OpenAIResponseOutputMessageWebSearchToolCall - description: "Web search tool call output message for OpenAI responses.\n\n:param id: Unique identifier for this tool call\n:param status: Current status of the web search operation\n:param type: Tool call type identifier, always \"web_search_call\"" + description: Web search tool call output message for OpenAI responses. OpenAIResponsePrompt: properties: id: @@ -10818,14 +10868,14 @@ components: required: - id title: OpenAIResponsePrompt - description: "OpenAI compatible Prompt object that is used in OpenAI responses.\n\n:param id: Unique identifier of the prompt template\n:param variables: Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types\nlike images or files.\n:param version: Version number of the prompt to use (defaults to latest if not specified)" + description: OpenAI compatible Prompt object that is used in OpenAI responses. OpenAIResponseText: properties: format: $ref: '#/components/schemas/OpenAIResponseTextFormat' type: object title: OpenAIResponseText - description: "Text response configuration for OpenAI responses.\n\n:param format: (Optional) Text format configuration specifying output format requirements" + description: Text response configuration for OpenAI responses. OpenAIResponseTextFormat: properties: type: @@ -10852,7 +10902,7 @@ components: type: boolean type: object title: OpenAIResponseTextFormat - description: "Configuration for Responses API text format.\n\n:param type: Must be \"text\", \"json_schema\", or \"json_object\" to identify the format type\n:param name: The name of the response format. Only used for json_schema.\n:param schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema.\n:param description: (Optional) A description of the response format. Only used for json_schema.\n:param strict: (Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema." + description: Configuration for Responses API text format. OpenAIResponseToolMCP: properties: type: @@ -10874,7 +10924,7 @@ components: required: - server_label title: OpenAIResponseToolMCP - description: "Model Context Protocol (MCP) tool configuration for OpenAI response object.\n\n:param type: Tool type identifier, always \"mcp\"\n:param server_label: Label to identify this MCP server\n:param allowed_tools: (Optional) Restriction on which tools can be used from this server" + description: Model Context Protocol (MCP) tool configuration for OpenAI response object. OpenAIResponseUsage: properties: input_tokens: @@ -10896,7 +10946,7 @@ components: - output_tokens - total_tokens title: OpenAIResponseUsage - description: "Usage information for OpenAI response.\n\n:param input_tokens: Number of tokens in the input\n:param output_tokens: Number of tokens in the output\n:param total_tokens: Total tokens used (input + output)\n:param input_tokens_details: Detailed breakdown of input token usage\n:param output_tokens_details: Detailed breakdown of output token usage" + description: Usage information for OpenAI response. OpenAIResponseUsageInputTokensDetails: properties: cached_tokens: @@ -10904,7 +10954,7 @@ components: type: integer type: object title: OpenAIResponseUsageInputTokensDetails - description: "Token details for input tokens in OpenAI response usage.\n\n:param cached_tokens: Number of tokens retrieved from cache" + description: Token details for input tokens in OpenAI response usage. OpenAIResponseUsageOutputTokensDetails: properties: reasoning_tokens: @@ -10912,7 +10962,7 @@ components: type: integer type: object title: OpenAIResponseUsageOutputTokensDetails - description: "Token details for output tokens in OpenAI response usage.\n\n:param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)" + description: Token details for output tokens in OpenAI response usage. OpenAISystemMessageParam: properties: role: @@ -10934,7 +10984,7 @@ components: required: - content title: OpenAISystemMessageParam - description: "A system message providing instructions or context to the model.\n\n:param role: Must be \"system\" to identify this as a system message\n:param content: The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions).\n:param name: (Optional) The name of the system message participant." + description: A system message providing instructions or context to the model. OpenAITokenLogProb: properties: token: @@ -10959,7 +11009,7 @@ components: - logprob - top_logprobs title: OpenAITokenLogProb - description: "The log probability for a token from an OpenAI-compatible chat completion response.\n\n:token: The token\n:bytes: (Optional) The bytes for the token\n:logprob: The log probability of the token\n:top_logprobs: The top log probabilities for the token" + description: The log probability for a token from an OpenAI-compatible chat completion response. OpenAIToolMessageParam: properties: role: @@ -10982,7 +11032,7 @@ components: - tool_call_id - content title: OpenAIToolMessageParam - description: "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"tool\" to identify this as a tool response\n:param tool_call_id: Unique identifier for the tool call this response is for\n:param content: The response content from the tool" + description: A message representing the result of a tool invocation in an OpenAI-compatible chat completion request. OpenAITopLogProb: properties: token: @@ -11001,7 +11051,7 @@ components: - token - logprob title: OpenAITopLogProb - description: "The top log probability for a token from an OpenAI-compatible chat completion response.\n\n:token: The token\n:bytes: (Optional) The bytes for the token\n:logprob: The log probability of the token" + description: The top log probability for a token from an OpenAI-compatible chat completion response. OpenAIUserMessageParam-Input: properties: role: @@ -11032,7 +11082,7 @@ components: required: - content title: OpenAIUserMessageParam - description: "A message from the user in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param name: (Optional) The name of the user message participant." + description: A message from the user in an OpenAI-compatible chat completion request. OpenAIUserMessageParam-Output: properties: role: @@ -11063,7 +11113,7 @@ components: required: - content title: OpenAIUserMessageParam - description: "A message from the user in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param name: (Optional) The name of the user message participant." + description: A message from the user in an OpenAI-compatible chat completion request. OptimizerConfig: properties: optimizer_type: @@ -11084,7 +11134,7 @@ components: - weight_decay - num_warmup_steps title: OptimizerConfig - description: "Configuration parameters for the optimization algorithm.\n\n:param optimizer_type: Type of optimizer to use (adam, adamw, or sgd)\n:param lr: Learning rate for the optimizer\n:param weight_decay: Weight decay coefficient for regularization\n:param num_warmup_steps: Number of steps for learning rate warmup" + description: Configuration parameters for the optimization algorithm. OptimizerType: type: string enum: @@ -11092,14 +11142,14 @@ components: - adamw - sgd title: OptimizerType - description: "Available optimizer algorithms for training.\n:cvar adam: Adaptive Moment Estimation optimizer\n:cvar adamw: AdamW optimizer with weight decay\n:cvar sgd: Stochastic Gradient Descent optimizer" + description: Available optimizer algorithms for training. Order: type: string enum: - asc - desc title: Order - description: "Sort order for paginated responses.\n:cvar asc: Ascending order\n:cvar desc: Descending order" + description: Sort order for paginated responses. OutputTokensDetails: properties: reasoning_tokens: @@ -11150,7 +11200,7 @@ components: - version - prompt_id title: Prompt - description: "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack.\n\n:param prompt: The system prompt text with variable placeholders. Variables are only supported when using the Responses API.\n:param version: Version (integer starting at 1, incremented on save)\n:param prompt_id: Unique identifier formatted as 'pmpt_<48-digit-hash>'\n:param variables: List of prompt variable names that can be used in the prompt template\n:param is_default: Boolean indicating whether this version is the default version for this prompt" + description: A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack. ProviderInfo: properties: api: @@ -11178,7 +11228,7 @@ components: - config - health title: ProviderInfo - description: "Information about a registered provider including its configuration and health status.\n\n:param api: The API name this provider implements\n:param provider_id: Unique identifier for the provider\n:param provider_type: The type of provider implementation\n:param config: Configuration parameters for the provider\n:param health: Current health status of the provider" + description: Information about a registered provider including its configuration and health status. QueryChunksResponse: properties: chunks: @@ -11196,7 +11246,7 @@ components: - chunks - scores title: QueryChunksResponse - description: "Response from querying chunks in a vector database.\n\n:param chunks: List of content chunks returned from the query\n:param scores: Relevance scores corresponding to each returned chunk" + description: Response from querying chunks in a vector database. RAGQueryConfig: properties: query_generator_config: @@ -11239,7 +11289,7 @@ components: weighted: '#/components/schemas/WeightedRanker' type: object title: RAGQueryConfig - description: "Configuration for the RAG query generation.\n\n:param query_generator_config: Configuration for the query generator.\n:param max_tokens_in_context: Maximum number of tokens in the context.\n:param max_chunks: Maximum number of chunks to retrieve.\n:param chunk_template: Template for formatting each retrieved chunk in the context.\n Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict).\n Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n\"\n:param mode: Search mode for retrieval—either \"vector\", \"keyword\", or \"hybrid\". Default \"vector\".\n:param ranker: Configuration for the ranker to use in hybrid search. Defaults to RRF ranker." + description: Configuration for the RAG query generation. RAGQueryResult: properties: content: @@ -11270,7 +11320,7 @@ components: title: Metadata type: object title: RAGQueryResult - description: "Result of a RAG query containing retrieved content and metadata.\n\n:param content: (Optional) The retrieved content from the query\n:param metadata: Additional metadata about the query result" + description: Result of a RAG query containing retrieved content and metadata. RAGSearchMode: type: string enum: @@ -11293,7 +11343,7 @@ components: minimum: 0.0 type: object title: RRFRanker - description: "Reciprocal Rank Fusion (RRF) ranker configuration.\n\n:param type: The type of ranker, always \"rrf\"\n:param impact_factor: The impact factor for RRF scoring. Higher values give more weight to higher-ranked results.\n Must be greater than 0" + description: Reciprocal Rank Fusion (RRF) ranker configuration. RegexParserScoringFnParams: properties: type: @@ -11315,7 +11365,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: RegexParserScoringFnParams - description: "Parameters for regex parser scoring function configuration.\n:param type: The type of scoring function parameters, always regex_parser\n:param parsing_regexes: Regex to extract the answer from generated response\n:param aggregation_functions: Aggregation functions to apply to the scores of each row" + description: Parameters for regex parser scoring function configuration. RerankData: properties: index: @@ -11329,7 +11379,7 @@ components: - index - relevance_score title: RerankData - description: "A single rerank result from a reranking response.\n\n:param index: The original index of the document in the input list\n:param relevance_score: The relevance score from the model output. Values are inverted when applicable so that higher scores indicate greater relevance." + description: A single rerank result from a reranking response. RerankResponse: properties: data: @@ -11341,7 +11391,7 @@ components: required: - data title: RerankResponse - description: "Response from a reranking request.\n\n:param data: List of rerank result objects, sorted by relevance score (descending)" + description: Response from a reranking request. RouteInfo: properties: route: @@ -11361,7 +11411,7 @@ components: - method - provider_types title: RouteInfo - description: "Information about an API route including its path, method, and implementing providers.\n\n:param route: The API endpoint path\n:param method: HTTP method for the route\n:param provider_types: List of provider types that implement this route" + description: Information about an API route including its path, method, and implementing providers. RowsDataSource: properties: type: @@ -11379,14 +11429,14 @@ components: required: - rows title: RowsDataSource - description: "A dataset stored in rows.\n:param rows: The dataset is stored in rows. E.g.\n - [\n {\"messages\": [{\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}]}\n ]" + description: A dataset stored in rows. RunShieldResponse: properties: violation: $ref: '#/components/schemas/SafetyViolation' type: object title: RunShieldResponse - description: "Response from running a safety shield.\n\n:param violation: (Optional) Safety violation detected by the shield, if any" + description: Response from running a safety shield. SafetyViolation: properties: violation_level: @@ -11402,7 +11452,7 @@ components: required: - violation_level title: SafetyViolation - description: "Details of a safety violation detected by content moderation.\n\n:param violation_level: Severity level of the violation\n:param user_message: (Optional) Message to convey to the user about the violation\n:param metadata: Additional metadata including specific violation codes for debugging and telemetry" + description: Details of a safety violation detected by content moderation. SamplingParams: properties: strategy: @@ -11431,7 +11481,7 @@ components: type: array type: object title: SamplingParams - description: "Sampling parameters.\n\n:param strategy: The sampling strategy.\n:param max_tokens: The maximum number of tokens that can be generated in the completion. The token count of\n your prompt plus max_tokens cannot exceed the model's context length.\n:param repetition_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens\n based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n:param stop: Up to 4 sequences where the API will stop generating further tokens.\n The returned text will not contain the stop sequence." + description: Sampling parameters. ScoreBatchResponse: properties: dataset_id: @@ -11446,7 +11496,7 @@ components: required: - results title: ScoreBatchResponse - description: "Response from batch scoring operations on datasets.\n\n:param dataset_id: (Optional) The identifier of the dataset that was scored\n:param results: A map of scoring function name to ScoringResult" + description: Response from batch scoring operations on datasets. ScoreResponse: properties: results: @@ -11458,7 +11508,7 @@ components: required: - results title: ScoreResponse - description: "The response from scoring.\n\n:param results: A map of scoring function name to ScoringResult." + description: The response from scoring. ScoringFn-Output: properties: identifier: @@ -11532,7 +11582,7 @@ components: - provider_id - return_type title: ScoringFn - description: "A scoring function resource for evaluating model outputs.\n:param type: The resource type, always scoring_function" + description: A scoring function resource for evaluating model outputs. ScoringResult: properties: score_rows: @@ -11550,7 +11600,7 @@ components: - score_rows - aggregated_results title: ScoringResult - description: "A scoring result for a single row.\n\n:param score_rows: The scoring result for each row. Each row is a map of column name to value.\n:param aggregated_results: Map of metric name to aggregated value" + description: A scoring result for a single row. SearchRankingOptions: properties: ranker: @@ -11562,7 +11612,7 @@ components: type: number type: object title: SearchRankingOptions - description: "Options for ranking and filtering search results.\n\n:param ranker: (Optional) Name of the ranking algorithm to use\n:param score_threshold: (Optional) Minimum relevance score threshold for results" + description: Options for ranking and filtering search results. Shield: properties: identifier: @@ -11591,7 +11641,7 @@ components: - identifier - provider_id title: Shield - description: "A safety shield resource that can be used to check content.\n\n:param params: (Optional) Configuration parameters for the shield\n:param type: The resource type, always shield" + description: A safety shield resource that can be used to check content. ShieldCallStep-Output: properties: turn_id: @@ -11621,7 +11671,7 @@ components: - step_id - violation title: ShieldCallStep - description: "A shield call step in an agent turn.\n\n:param violation: The violation from the shield call." + description: A shield call step in an agent turn. StopReason: type: string enum: @@ -11638,7 +11688,7 @@ components: default: string type: object title: StringType - description: "Parameter type for string values.\n\n:param type: Discriminator type. Always \"string\"" + description: Parameter type for string values. SystemMessage: properties: role: @@ -11672,14 +11722,14 @@ components: required: - content title: SystemMessage - description: "A system message providing instructions or context to the model.\n\n:param role: Must be \"system\" to identify this as a system message\n:param content: The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." + description: A system message providing instructions or context to the model. SystemMessageBehavior: type: string enum: - append - replace title: SystemMessageBehavior - description: "Config for how to override the default system prompt.\n\n:cvar append: Appends the provided system message to the default system prompt:\n https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_2/#-function-definitions-in-the-system-prompt-\n:cvar replace: Replaces the default system prompt with the provided system message. The system message can include the string\n '{{function_definitions}}' to indicate where the function definitions should be inserted." + description: Config for how to override the default system prompt. TextContentItem: properties: type: @@ -11694,7 +11744,7 @@ components: required: - text title: TextContentItem - description: "A text content item\n\n:param type: Discriminator type of the content item. Always \"text\"\n:param text: Text content" + description: A text content item ToolCall: properties: call_id: @@ -11721,7 +11771,7 @@ components: - required - none title: ToolChoice - description: "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model.\n\n:cvar auto: The model may use tools if it determines that is appropriate.\n:cvar required: The model must use tools.\n:cvar none: The model must not use tools." + description: Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model. ToolConfig: properties: tool_choice: @@ -11737,7 +11787,7 @@ components: $ref: '#/components/schemas/SystemMessageBehavior' type: object title: ToolConfig - description: "Configuration for tool use.\n\n:param tool_choice: (Optional) Whether tool use is automatic, required, or none. Can also specify a tool name to use a specific tool. Defaults to ToolChoice.auto.\n:param tool_prompt_format: (Optional) Instructs the model how to format tool calls. By default, Llama Stack will attempt to use a format that is best adapted to the model.\n - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object.\n - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a tag.\n - `ToolPromptFormat.python_list`: The tool calls are output as Python syntax -- a list of function calls.\n:param system_message_behavior: (Optional) Config for how to override the default system prompt.\n - `SystemMessageBehavior.append`: Appends the provided system message to the default system prompt.\n - `SystemMessageBehavior.replace`: Replaces the default system prompt with the provided system message. The system message can include the string\n '{{function_definitions}}' to indicate where the function definitions should be inserted." + description: Configuration for tool use. ToolDef: properties: toolgroup_id: @@ -11765,7 +11815,7 @@ components: required: - name title: ToolDef - description: "Tool definition used in runtime contexts.\n\n:param name: Name of the tool\n:param description: (Optional) Human-readable description of what the tool does\n:param input_schema: (Optional) JSON Schema for tool inputs (MCP inputSchema)\n:param output_schema: (Optional) JSON Schema for tool outputs (MCP outputSchema)\n:param metadata: (Optional) Additional metadata about the tool\n:param toolgroup_id: (Optional) ID of the tool group this tool belongs to" + description: Tool definition used in runtime contexts. ToolExecutionStep-Output: properties: turn_id: @@ -11804,7 +11854,7 @@ components: - tool_calls - tool_responses title: ToolExecutionStep - description: "A tool execution step in an agent turn.\n\n:param tool_calls: The tool calls to execute.\n:param tool_responses: The tool responses from the tool calls." + description: A tool execution step in an agent turn. ToolGroup: properties: identifier: @@ -11835,7 +11885,7 @@ components: - identifier - provider_id title: ToolGroup - description: "A group of related tools managed together.\n\n:param type: Type of resource, always 'tool_group'\n:param mcp_endpoint: (Optional) Model Context Protocol endpoint for remote tools\n:param args: (Optional) Additional arguments for the tool group" + description: A group of related tools managed together. ToolInvocationResult: properties: content: @@ -11872,7 +11922,7 @@ components: type: object type: object title: ToolInvocationResult - description: "Result of a tool invocation.\n\n:param content: (Optional) The output content from the tool execution\n:param error_message: (Optional) Error message if the tool execution failed\n:param error_code: (Optional) Numeric error code if the tool execution failed\n:param metadata: (Optional) Additional metadata about the tool execution" + description: Result of a tool invocation. ToolPromptFormat: type: string enum: @@ -11880,7 +11930,7 @@ components: - function_tag - python_list title: ToolPromptFormat - description: "Prompt format for calling custom / zero shot tools.\n\n:cvar json: JSON format for calling tools. It takes the form:\n {\n \"type\": \"function\",\n \"function\" : {\n \"name\": \"function_name\",\n \"description\": \"function_description\",\n \"parameters\": {...}\n }\n }\n:cvar function_tag: Function tag format, pseudo-XML. This looks like:\n (parameters)\n\n:cvar python_list: Python list. The output is a valid Python expression that can be\n evaluated to a list. Each element in the list is a function call. Example:\n [\"function_name(param1, param2)\", \"function_name(param1, param2)\"]" + description: Prompt format for calling custom / zero shot tools. ToolResponse-Input: properties: call_id: @@ -11923,7 +11973,7 @@ components: - tool_name - content title: ToolResponse - description: "Response from a tool invocation.\n\n:param call_id: Unique identifier for the tool call this response is for\n:param tool_name: Name of the tool that was invoked\n:param content: The response content from the tool\n:param metadata: (Optional) Additional metadata about the tool response" + description: Response from a tool invocation. ToolResponse-Output: properties: call_id: @@ -11966,7 +12016,7 @@ components: - tool_name - content title: ToolResponse - description: "Response from a tool invocation.\n\n:param call_id: Unique identifier for the tool call this response is for\n:param tool_name: Name of the tool that was invoked\n:param content: The response content from the tool\n:param metadata: (Optional) Additional metadata about the tool response" + description: Response from a tool invocation. ToolResponseMessage-Output: properties: role: @@ -12004,7 +12054,7 @@ components: - call_id - content title: ToolResponseMessage - description: "A message representing the result of a tool invocation.\n\n:param role: Must be \"tool\" to identify this as a tool response\n:param call_id: Unique identifier for the tool call this response is for\n:param content: The response content from the tool" + description: A message representing the result of a tool invocation. TopKSamplingStrategy: properties: type: @@ -12020,7 +12070,7 @@ components: required: - top_k title: TopKSamplingStrategy - description: "Top-k sampling strategy that restricts sampling to the k most likely tokens.\n\n:param type: Must be \"top_k\" to identify this sampling strategy\n:param top_k: Number of top tokens to consider for sampling. Must be at least 1" + description: Top-k sampling strategy that restricts sampling to the k most likely tokens. TopPSamplingStrategy: properties: type: @@ -12040,7 +12090,7 @@ components: required: - temperature title: TopPSamplingStrategy - description: "Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p.\n\n:param type: Must be \"top_p\" to identify this sampling strategy\n:param temperature: Controls randomness in sampling. Higher values increase randomness\n:param top_p: Cumulative probability threshold for nucleus sampling. Defaults to 0.95" + description: Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p. TrainingConfig: properties: n_epochs: @@ -12072,7 +12122,7 @@ components: required: - n_epochs title: TrainingConfig - description: "Comprehensive configuration for the training process.\n\n:param n_epochs: Number of training epochs to run\n:param max_steps_per_epoch: Maximum number of steps to run per epoch\n:param gradient_accumulation_steps: Number of steps to accumulate gradients before updating\n:param max_validation_steps: (Optional) Maximum number of validation steps per epoch\n:param data_config: (Optional) Configuration for data loading and formatting\n:param optimizer_config: (Optional) Configuration for the optimization algorithm\n:param efficiency_config: (Optional) Configuration for memory and compute optimizations\n:param dtype: (Optional) Data type for model parameters (bf16, fp16, fp32)" + description: Comprehensive configuration for the training process. Turn: properties: turn_id: @@ -12128,7 +12178,7 @@ components: - output_message - started_at title: Turn - description: "A single turn in an interaction with an Agentic System.\n\n:param turn_id: Unique identifier for the turn within a session\n:param session_id: Unique identifier for the conversation session\n:param input_messages: List of messages that initiated this turn\n:param steps: Ordered list of processing steps executed during this turn\n:param output_message: The model's generated response containing content and metadata\n:param output_attachments: (Optional) Files or media attached to the agent's response\n:param started_at: Timestamp when the turn began\n:param completed_at: (Optional) Timestamp when the turn finished, if completed" + description: A single turn in an interaction with an Agentic System. URIDataSource: properties: type: @@ -12143,7 +12193,7 @@ components: required: - uri title: URIDataSource - description: "A dataset that can be obtained from a URI.\n:param uri: The dataset can be obtained from a URI. E.g.\n - \"https://mywebsite.com/mydata.jsonl\"\n - \"lsfs://mydata.jsonl\"\n - \"data:csv;base64,{base64_content}\"" + description: A dataset that can be obtained from a URI. URL: properties: uri: @@ -12153,7 +12203,7 @@ components: required: - uri title: URL - description: "A URL reference to external content.\n\n:param uri: The URL string pointing to the resource" + description: A URL reference to external content. UnionType: properties: type: @@ -12163,7 +12213,7 @@ components: default: union type: object title: UnionType - description: "Parameter type for union values.\n\n:param type: Discriminator type. Always \"union\"" + description: Parameter type for union values. UserMessage-Input: properties: role: @@ -12219,7 +12269,7 @@ components: required: - content title: UserMessage - description: "A message from the user in a chat conversation.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param context: (Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future." + description: A message from the user in a chat conversation. UserMessage-Output: properties: role: @@ -12275,7 +12325,7 @@ components: required: - content title: UserMessage - description: "A message from the user in a chat conversation.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param context: (Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future." + description: A message from the user in a chat conversation. VectorStoreChunkingStrategyAuto: properties: type: @@ -12285,7 +12335,7 @@ components: default: auto type: object title: VectorStoreChunkingStrategyAuto - description: "Automatic chunking strategy for vector store files.\n\n:param type: Strategy type, always \"auto\" for automatic chunking" + description: Automatic chunking strategy for vector store files. VectorStoreChunkingStrategyStatic: properties: type: @@ -12299,7 +12349,7 @@ components: required: - static title: VectorStoreChunkingStrategyStatic - description: "Static chunking strategy with configurable parameters.\n\n:param type: Strategy type, always \"static\" for static chunking\n:param static: Configuration parameters for the static chunking strategy" + description: Static chunking strategy with configurable parameters. VectorStoreChunkingStrategyStaticConfig: properties: chunk_overlap_tokens: @@ -12314,7 +12364,7 @@ components: default: 800 type: object title: VectorStoreChunkingStrategyStaticConfig - description: "Configuration for static chunking strategy.\n\n:param chunk_overlap_tokens: Number of tokens to overlap between adjacent chunks\n:param max_chunk_size_tokens: Maximum number of tokens per chunk, must be between 100 and 4096" + description: Configuration for static chunking strategy. VectorStoreContent: properties: type: @@ -12329,7 +12379,7 @@ components: - type - text title: VectorStoreContent - description: "Content item from a vector store file or search result.\n\n:param type: Content type, currently only \"text\" is supported\n:param text: The actual text content" + description: Content item from a vector store file or search result. VectorStoreFileBatchObject: properties: id: @@ -12366,7 +12416,7 @@ components: - status - file_counts title: VectorStoreFileBatchObject - description: "OpenAI Vector Store File Batch object.\n\n:param id: Unique identifier for the file batch\n:param object: Object type identifier, always \"vector_store.file_batch\"\n:param created_at: Timestamp when the file batch was created\n:param vector_store_id: ID of the vector store containing the file batch\n:param status: Current processing status of the file batch\n:param file_counts: File processing status counts for the batch" + description: OpenAI Vector Store File Batch object. VectorStoreFileCounts: properties: completed: @@ -12392,7 +12442,7 @@ components: - in_progress - total title: VectorStoreFileCounts - description: "File processing status counts for a vector store.\n\n:param completed: Number of files that have been successfully processed\n:param cancelled: Number of files that had their processing cancelled\n:param failed: Number of files that failed to process\n:param in_progress: Number of files currently being processed\n:param total: Total number of files in the vector store" + description: File processing status counts for a vector store. VectorStoreFileLastError: properties: code: @@ -12410,7 +12460,7 @@ components: - code - message title: VectorStoreFileLastError - description: "Error information for failed vector store file processing.\n\n:param code: Error code indicating the type of failure\n:param message: Human-readable error message describing the failure" + description: Error information for failed vector store file processing. VectorStoreFileObject: properties: id: @@ -12465,7 +12515,7 @@ components: - status - vector_store_id title: VectorStoreFileObject - description: "OpenAI Vector Store File object.\n\n:param id: Unique identifier for the file\n:param object: Object type identifier, always \"vector_store.file\"\n:param attributes: Key-value attributes associated with the file\n:param chunking_strategy: Strategy used for splitting the file into chunks\n:param created_at: Timestamp when the file was added to the vector store\n:param last_error: (Optional) Error information if file processing failed\n:param status: Current processing status of the file\n:param usage_bytes: Storage space used by this file in bytes\n:param vector_store_id: ID of the vector store containing this file" + description: OpenAI Vector Store File object. VectorStoreObject: properties: id: @@ -12511,7 +12561,7 @@ components: - created_at - file_counts title: VectorStoreObject - description: "OpenAI Vector Store object.\n\n:param id: Unique identifier for the vector store\n:param object: Object type identifier, always \"vector_store\"\n:param created_at: Timestamp when the vector store was created\n:param name: (Optional) Name of the vector store\n:param usage_bytes: Storage space used by the vector store in bytes\n:param file_counts: File processing status counts for the vector store\n:param status: Current status of the vector store\n:param expires_after: (Optional) Expiration policy for the vector store\n:param expires_at: (Optional) Timestamp when the vector store will expire\n:param last_active_at: (Optional) Timestamp of last activity on the vector store\n:param metadata: Set of key-value pairs that can be attached to the vector store" + description: OpenAI Vector Store object. VectorStoreSearchResponse: properties: file_id: @@ -12543,7 +12593,7 @@ components: - score - content title: VectorStoreSearchResponse - description: "Response from searching a vector store.\n\n:param file_id: Unique identifier of the file containing the result\n:param filename: Name of the file containing the result\n:param score: Relevance score for this search result\n:param attributes: (Optional) Key-value attributes associated with the file\n:param content: List of content items matching the search query" + description: Response from searching a vector store. VectorStoreSearchResponsePage: properties: object: @@ -12570,7 +12620,7 @@ components: - search_query - data title: VectorStoreSearchResponsePage - description: "Paginated response from searching a vector store.\n\n:param object: Object type identifier for the search results page\n:param search_query: The original search query that was executed\n:param data: List of search result objects\n:param has_more: Whether there are more results available beyond this page\n:param next_page: (Optional) Token for retrieving the next page of results" + description: Paginated response from searching a vector store. VersionInfo: properties: version: @@ -12580,7 +12630,7 @@ components: required: - version title: VersionInfo - description: "Version information for the service.\n\n:param version: Version number of the service" + description: Version information for the service. ViolationLevel: type: string enum: @@ -12588,7 +12638,7 @@ components: - warn - error title: ViolationLevel - description: "Severity level of a safety violation.\n\n:cvar INFO: Informational level violation that does not require action\n:cvar WARN: Warning level violation that suggests caution but allows continuation\n:cvar ERROR: Error level violation that requires blocking or intervention" + description: Severity level of a safety violation. WeightedRanker: properties: type: @@ -12605,7 +12655,7 @@ components: default: 0.5 type: object title: WeightedRanker - description: "Weighted ranker configuration that combines vector and keyword scores.\n\n:param type: The type of ranker, always \"weighted\"\n:param alpha: Weight factor between 0 and 1.\n 0 means only use keyword scores,\n 1 means only use vector scores,\n values in between blend both scores." + description: Weighted ranker configuration that combines vector and keyword scores. _URLOrData: properties: url: @@ -12616,7 +12666,7 @@ components: type: string type: object title: _URLOrData - description: "A URL or a base64 encoded string\n\n:param url: A URL of the image or data URL in the format of data:image/{type};base64,{data}. Note that URL could have length limits.\n:param data: base64 encoded image data as string" + description: A URL or a base64 encoded string __main_____agents_agent_id_session_Request: properties: agent_id: @@ -13186,7 +13236,7 @@ components: - ranking_options title: _vector_stores_vector_store_id_search_Request Error: - description: "Error response from the API. Roughly follows RFC 7807.\n\n:param status: HTTP status code\n:param title: Error title, a short summary of the error which is invariant for an error type\n:param detail: Error detail, a longer human-readable description of the error\n:param instance: (Optional) A URL which can be used to retrieve more information about the specific occurrence of the error" + description: Error response from the API. Roughly follows RFC 7807. properties: status: title: Status @@ -13208,7 +13258,7 @@ components: title: Error type: object Agent: - description: "An agent instance with configuration and metadata.\n\n:param agent_id: Unique identifier for the agent\n:param agent_config: Configuration settings for the agent\n:param created_at: Timestamp when the agent was created" + description: An agent instance with configuration and metadata. properties: agent_id: title: Agent Id @@ -13226,7 +13276,7 @@ components: title: Agent type: object AgentStepResponse: - description: "Response containing details of a specific agent step.\n\n:param step: The complete step data and execution details" + description: Response containing details of a specific agent step. properties: step: discriminator: @@ -13247,7 +13297,7 @@ components: title: AgentStepResponse type: object CompletionMessage: - description: "A message containing the model's (assistant) response in a chat conversation.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param stop_reason: Reason why the model stopped generating. Options are:\n - `StopReason.end_of_turn`: The model finished generating the entire response.\n - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response.\n - `StopReason.out_of_tokens`: The model ran out of token budget.\n:param tool_calls: List of tool calls. Each tool call is a ToolCall object." + description: A message containing the model's (assistant) response in a chat conversation. properties: role: const: assistant @@ -13289,7 +13339,7 @@ components: title: CompletionMessage type: object InferenceStep: - description: "An inference step in an agent turn.\n\n:param model_response: The response from the LLM." + description: An inference step in an agent turn. properties: turn_id: title: Turn Id @@ -13321,7 +13371,7 @@ components: title: InferenceStep type: object ListOpenAIResponseInputItem: - description: "List container for OpenAI response input items.\n\n:param data: List of input items\n:param object: Object type identifier, always \"list\"" + description: List container for OpenAI response input items. properties: data: items: @@ -13359,7 +13409,7 @@ components: title: ListOpenAIResponseInputItem type: object ListOpenAIResponseObject: - description: "Paginated list of OpenAI response objects with navigation metadata.\n\n:param data: List of response objects with their input context\n:param has_more: Whether there are more results available beyond this page\n:param first_id: Identifier of the first item in this page\n:param last_id: Identifier of the last item in this page\n:param object: Object type identifier, always \"list\"" + description: Paginated list of OpenAI response objects with navigation metadata. properties: data: items: @@ -13388,7 +13438,7 @@ components: title: ListOpenAIResponseObject type: object MemoryRetrievalStep: - description: "A memory retrieval step in an agent turn.\n\n:param vector_store_ids: The IDs of the vector databases to retrieve context from.\n:param inserted_context: The context retrieved from the vector databases." + description: A memory retrieval step in an agent turn. properties: turn_id: title: Turn Id @@ -13444,7 +13494,7 @@ components: title: MemoryRetrievalStep type: object OpenAIDeleteResponseObject: - description: "Response object confirming deletion of an OpenAI response.\n\n:param id: Unique identifier of the deleted response\n:param object: Object type identifier, always \"response\"\n:param deleted: Deletion confirmation flag, always True" + description: Response object confirming deletion of an OpenAI response. properties: id: title: Id @@ -13463,7 +13513,7 @@ components: title: OpenAIDeleteResponseObject type: object PaginatedResponse: - description: "A generic paginated response that follows a simple format.\n\n:param data: The list of items for the current page\n:param has_more: Whether there are more items available after this set\n:param url: The URL for accessing this list" + description: A generic paginated response that follows a simple format. properties: data: items: @@ -13484,7 +13534,7 @@ components: title: PaginatedResponse type: object Session: - description: "A single session of an interaction with an Agentic System.\n\n:param session_id: Unique identifier for the conversation session\n:param session_name: Human-readable name for the session\n:param turns: List of all turns that have occurred in this session\n:param started_at: Timestamp when the session was created" + description: A single session of an interaction with an Agentic System. properties: session_id: title: Session Id @@ -13509,7 +13559,7 @@ components: title: Session type: object ShieldCallStep: - description: "A shield call step in an agent turn.\n\n:param violation: The violation from the shield call." + description: A shield call step in an agent turn. properties: turn_id: title: Turn Id @@ -13541,7 +13591,7 @@ components: title: ShieldCallStep type: object ToolExecutionStep: - description: "A tool execution step in an agent turn.\n\n:param tool_calls: The tool calls to execute.\n:param tool_responses: The tool responses from the tool calls." + description: A tool execution step in an agent turn. properties: turn_id: title: Turn Id @@ -13582,7 +13632,7 @@ components: title: ToolExecutionStep type: object ToolResponse: - description: "Response from a tool invocation.\n\n:param call_id: Unique identifier for the tool call this response is for\n:param tool_name: Name of the tool that was invoked\n:param content: The response content from the tool\n:param metadata: (Optional) Additional metadata about the tool response" + description: Response from a tool invocation. properties: call_id: title: Call Id @@ -13701,7 +13751,7 @@ components: title: ConversationItemDeletedResource type: object ListOpenAIFileResponse: - description: "Response for listing files in OpenAI Files API.\n\n:param data: List of file objects\n:param has_more: Whether there are more files available beyond this page\n:param first_id: ID of the first file in the list for pagination\n:param last_id: ID of the last file in the list for pagination\n:param object: The object type, which is always \"list\"" + description: Response for listing files in OpenAI Files API. properties: data: items: @@ -13730,7 +13780,7 @@ components: title: ListOpenAIFileResponse type: object OpenAIFileDeleteResponse: - description: "Response for deleting a file in OpenAI Files API.\n\n:param id: The file identifier that was deleted\n:param object: The object type, which is always \"file\"\n:param deleted: Whether the file was successfully deleted" + description: Response for deleting a file in OpenAI Files API. properties: id: title: Id @@ -13749,7 +13799,7 @@ components: title: OpenAIFileDeleteResponse type: object ListOpenAIChatCompletionResponse: - description: "Response from listing OpenAI-compatible chat completions.\n\n:param data: List of chat completion objects with their input messages\n:param has_more: Whether there are more completions available beyond this list\n:param first_id: ID of the first completion in this list\n:param last_id: ID of the last completion in this list\n:param object: Must be \"list\" to identify this as a list response" + description: Response from listing OpenAI-compatible chat completions. properties: data: items: @@ -15440,7 +15490,7 @@ components: title: ListOpenAIChatCompletionResponse type: object OpenAIAssistantMessageParam: - description: "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param name: (Optional) The name of the assistant message participant.\n:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. properties: role: const: assistant @@ -15468,7 +15518,7 @@ components: title: OpenAIAssistantMessageParam type: object OpenAIChoice: - description: "A choice from an OpenAI-compatible chat completion response.\n\n:param message: The message from the model\n:param finish_reason: The reason the model stopped generating\n:param index: The index of the choice\n:param logprobs: (Optional) The log probabilities for the tokens in the message" + description: A choice from an OpenAI-compatible chat completion response. properties: message: discriminator: @@ -15502,7 +15552,7 @@ components: title: OpenAIChoice type: object OpenAIChoiceLogprobs: - description: "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.\n\n:param content: (Optional) The log probabilities for the tokens in the message\n:param refusal: (Optional) The log probabilities for the tokens in the message" + description: The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response. properties: content: title: Content @@ -15569,7 +15619,7 @@ components: title: OpenAICompletionWithInputMessages type: object OpenAIUserMessageParam: - description: "A message from the user in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param name: (Optional) The name of the user message participant." + description: A message from the user in an OpenAI-compatible chat completion request. properties: role: const: user @@ -15601,7 +15651,7 @@ components: title: OpenAIUserMessageParam type: object Checkpoint: - description: "Checkpoint created during training runs.\n\n:param identifier: Unique identifier for the checkpoint\n:param created_at: Timestamp when the checkpoint was created\n:param epoch: Training epoch when the checkpoint was saved\n:param post_training_job_id: Identifier of the training job that created this checkpoint\n:param path: File system path where the checkpoint is stored\n:param training_metrics: (Optional) Training metrics associated with this checkpoint" + description: Checkpoint created during training runs. properties: identifier: title: Identifier @@ -15631,7 +15681,7 @@ components: title: Checkpoint type: object PostTrainingJobArtifactsResponse: - description: "Artifacts of a finetuning job.\n\n:param job_uuid: Unique identifier for the training job\n:param checkpoints: List of model checkpoints created during training" + description: Artifacts of a finetuning job. properties: job_uuid: title: Job Uuid @@ -15646,7 +15696,7 @@ components: title: PostTrainingJobArtifactsResponse type: object PostTrainingJobStatusResponse: - description: "Status of a finetuning job.\n\n:param job_uuid: Unique identifier for the training job\n:param status: Current status of the training job\n:param scheduled_at: (Optional) Timestamp when the job was scheduled\n:param started_at: (Optional) Timestamp when the job execution began\n:param completed_at: (Optional) Timestamp when the job finished, if completed\n:param resources_allocated: (Optional) Information about computational resources allocated to the job\n:param checkpoints: List of model checkpoints created during training" + description: Status of a finetuning job. properties: job_uuid: title: Job Uuid @@ -15684,7 +15734,7 @@ components: title: PostTrainingJobStatusResponse type: object ScoringFn: - description: "A scoring function resource for evaluating model outputs.\n:param type: The resource type, always scoring_function" + description: A scoring function resource for evaluating model outputs. properties: identifier: description: Unique identifier for this resource in llama stack @@ -16484,7 +16534,7 @@ components: title: URL type: object ListToolDefsResponse: - description: "Response containing a list of tool definitions.\n\n:param data: List of tool definitions" + description: Response containing a list of tool definitions. properties: data: items: @@ -16496,7 +16546,7 @@ components: title: ListToolDefsResponse type: object VectorStoreDeleteResponse: - description: "Response from deleting a vector store.\n\n:param id: Unique identifier of the deleted vector store\n:param object: Object type identifier for the deletion response\n:param deleted: Whether the deletion operation was successful" + description: Response from deleting a vector store. properties: id: title: Id @@ -16514,7 +16564,7 @@ components: title: VectorStoreDeleteResponse type: object VectorStoreFileContentsResponse: - description: "Response from retrieving the contents of a vector store file.\n\n:param file_id: Unique identifier for the file\n:param filename: Name of the file\n:param attributes: Key-value attributes associated with the file\n:param content: List of content items from the file" + description: Response from retrieving the contents of a vector store file. properties: file_id: title: File Id @@ -16539,7 +16589,7 @@ components: title: VectorStoreFileContentsResponse type: object VectorStoreFileDeleteResponse: - description: "Response from deleting a vector store file.\n\n:param id: Unique identifier of the deleted file\n:param object: Object type identifier for the deletion response\n:param deleted: Whether the deletion operation was successful" + description: Response from deleting a vector store file. properties: id: title: Id @@ -16557,7 +16607,7 @@ components: title: VectorStoreFileDeleteResponse type: object VectorStoreFilesListInBatchResponse: - description: "Response from listing files in a vector store file batch.\n\n:param object: Object type identifier, always \"list\"\n:param data: List of vector store file objects in the batch\n:param first_id: (Optional) ID of the first file in the list for pagination\n:param last_id: (Optional) ID of the last file in the list for pagination\n:param has_more: Whether there are more files available beyond this page" + description: Response from listing files in a vector store file batch. properties: object: default: list @@ -16585,7 +16635,7 @@ components: title: VectorStoreFilesListInBatchResponse type: object VectorStoreListFilesResponse: - description: "Response from listing files in a vector store.\n\n:param object: Object type identifier, always \"list\"\n:param data: List of vector store file objects\n:param first_id: (Optional) ID of the first file in the list for pagination\n:param last_id: (Optional) ID of the last file in the list for pagination\n:param has_more: Whether there are more files available beyond this page" + description: Response from listing files in a vector store. properties: object: default: list @@ -16613,7 +16663,7 @@ components: title: VectorStoreListFilesResponse type: object VectorStoreListResponse: - description: "Response from listing vector stores.\n\n:param object: Object type identifier, always \"list\"\n:param data: List of vector store objects\n:param first_id: (Optional) ID of the first vector store in the list for pagination\n:param last_id: (Optional) ID of the last vector store in the list for pagination\n:param has_more: Whether there are more vector stores available beyond this page" + description: Response from listing vector stores. properties: object: default: list @@ -16699,7 +16749,7 @@ components: title: OpenAIResponseMessage type: object OpenAIResponseObjectWithInput: - description: "OpenAI response object extended with input context information.\n\n:param input: List of input items that led to this response" + description: OpenAI response object extended with input context information. properties: created_at: title: Created At @@ -18473,7 +18523,7 @@ components: title: OpenAIResponseObjectWithInput type: object ImageContentItem: - description: "A image content item\n\n:param type: Discriminator type of the content item. Always \"image\"\n:param image: Image as a base64 encoded string or an URL" + description: A image content item properties: type: const: image @@ -18688,9 +18738,9 @@ components: example: status: 500 title: Internal Server Error - detail: An unexpected error occurred + detail: An unexpected error occurred. Our team has been notified. DefaultError: - description: An error occurred + description: An unexpected error occurred content: application/json: schema: diff --git a/docs/static/deprecated-llama-stack-spec.json b/docs/static/deprecated-llama-stack-spec.json new file mode 100644 index 000000000..8614bd0b7 --- /dev/null +++ b/docs/static/deprecated-llama-stack-spec.json @@ -0,0 +1,18640 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Llama Stack API", + "description": "A comprehensive API for building and deploying AI applications", + "version": "1.0.0" + }, + "servers": [ + { + "url": "https://api.llamastack.com", + "description": "Production server" + }, + { + "url": "https://staging-api.llamastack.com", + "description": "Staging server" + } + ], + "paths": { + "/v1/agents": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all agents.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_agents_v1_agents_get", + "deprecated": true, + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create an agent with the given configuration.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_v1_agents_post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentConfig" + } + } + } + }, + "responses": { + "200": { + "description": "An AgentCreateResponse with the agent ID.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentCreateResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/agents/{agent_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete an agent by its ID and its associated sessions and turns.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_agent_v1_agents__agent_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to delete." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Describe an agent by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agent_v1_agents__agent_id__get", + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "ID of the agent." + } + ], + "responses": { + "200": { + "description": "An Agent of the agent.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Agent" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/agents/{agent_id}/session": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a new session for an agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_session_v1_agents__agent_id__session_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An AgentSessionCreateResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentSessionCreateResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to create the session for." + } + ] + } + }, + "/v1/agents/{agent_id}/session/{session_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete an agent session by its ID and its associated turns.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_agents_session_v1_agents__agent_id__session__session_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to delete." + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to delete the session for." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve an agent session by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_session_v1_agents__agent_id__session__session_id__get", + "deprecated": true, + "parameters": [ + { + "name": "turn_ids", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Turn Ids" + } + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get." + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the session for." + } + ], + "responses": { + "200": { + "description": "A Session.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Session" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/agents/{agent_id}/session/{session_id}/turn": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a new turn for an agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_turn_v1_agents__agent_id__session__session_id__turn_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_session_id_turn_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "If stream=False, returns a Turn object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to create the turn for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to create the turn for." + } + ] + } + }, + "/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve an agent turn by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_turn_v1_agents__agent_id__session__session_id__turn__turn_id__get", + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the turn for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get the turn for." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Turn Id" + }, + "description": "The ID of the turn to get." + } + ], + "responses": { + "200": { + "description": "A Turn.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume": { + "post": { + "tags": [ + "V1" + ], + "summary": "Resume an agent turn with executed tool call responses.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "resume_agent_turn_v1_agents__agent_id__session__session_id__turn__turn_id__resume_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_session_id_turn_turn_id_resume_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to resume." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to resume." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the turn to resume." + } + ] + } + }, + "/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve an agent step by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_step_v1_agents__agent_id__session__session_id__turn__turn_id__step__step_id__get", + "deprecated": true, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the step for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get the step for." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Turn Id" + }, + "description": "The ID of the turn to get the step for." + }, + { + "name": "step_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Step Id" + }, + "description": "The ID of the step to get." + } + ], + "responses": { + "200": { + "description": "An AgentStepResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentStepResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/agents/{agent_id}/sessions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all session(s) of a given agent.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_agent_sessions_v1_agents__agent_id__sessions_get", + "deprecated": true, + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to list sessions for." + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/datasetio/append-rows/{dataset_id}": { + "post": { + "tags": [ + "V1" + ], + "summary": "Append rows to a dataset.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "append_rows_v1_datasetio_append_rows__dataset_id__post", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: dataset_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/datasetio/iterrows/{dataset_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get a paginated list of rows from a dataset.", + "description": "Query endpoint for proper schema generation.", + "operationId": "iterrows_v1_datasetio_iterrows__dataset_id__get", + "deprecated": true, + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Dataset Id" + } + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/datasets": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all datasets.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_datasets_v1_datasets_get", + "responses": { + "200": { + "description": "A ListDatasetsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDatasetsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a new dataset.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_dataset_v1_datasets_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____datasets_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/datasets/{dataset_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a dataset by its ID.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_dataset_v1_datasets__dataset_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: dataset_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a dataset by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_dataset_v1_datasets__dataset_id__get", + "deprecated": true, + "parameters": [ + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Dataset Id" + } + } + ], + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/eval/benchmarks": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all benchmarks.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_benchmarks_v1_eval_benchmarks_get", + "deprecated": true, + "responses": { + "200": { + "description": "A ListBenchmarksResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBenchmarksResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a benchmark.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_benchmark_v1_eval_benchmarks_post", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/eval/benchmarks/{benchmark_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a benchmark.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_benchmark_v1_eval_benchmarks__benchmark_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to unregister." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a benchmark by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_benchmark_v1_eval_benchmarks__benchmark_id__get", + "deprecated": true, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to get." + } + ], + "responses": { + "200": { + "description": "A Benchmark.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Benchmark" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/eval/benchmarks/{benchmark_id}/evaluations": { + "post": { + "tags": [ + "V1" + ], + "summary": "Evaluate a list of rows on a benchmark.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "evaluate_rows_v1_eval_benchmarks__benchmark_id__evaluations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenchmarkConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "EvaluateResponse object containing generations and scores.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvaluateResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + } + ] + } + }, + "/v1/eval/benchmarks/{benchmark_id}/jobs": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run an evaluation on a benchmark.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_eval_v1_eval_benchmarks__benchmark_id__jobs_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenchmarkConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The job that was created to run the evaluation.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + } + ] + } + }, + "/v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Cancel a job.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "job_cancel_v1_eval_benchmarks__benchmark_id__jobs__job_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the job to cancel." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get the status of a job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "job_status_v1_eval_benchmarks__benchmark_id__jobs__job_id__get", + "deprecated": true, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Job Id" + }, + "description": "The ID of the job to get the status of." + } + ], + "responses": { + "200": { + "description": "The status of the evaluation job.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get the result of a job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "job_result_v1_eval_benchmarks__benchmark_id__jobs__job_id__result_get", + "deprecated": true, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Job Id" + }, + "description": "The ID of the job to get the result of." + } + ], + "responses": { + "200": { + "description": "The result of the job.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvaluateResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/batches": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all batches for the current user.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_batches_v1_openai_v1_batches_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + } + ], + "responses": { + "200": { + "description": "A list of batch objects.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBatchesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create a new batch for processing multiple API requests.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_batch_v1_openai_v1_batches_post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_batches_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The created batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/batches/{batch_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve information about a specific batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "retrieve_batch_v1_openai_v1_batches__batch_id__get", + "deprecated": true, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the batch to retrieve." + } + ], + "responses": { + "200": { + "description": "The batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/batches/{batch_id}/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancel a batch that is in progress.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "cancel_batch_v1_openai_v1_batches__batch_id__cancel_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_batches_batch_id_cancel_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The updated batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the batch to cancel." + } + ] + } + }, + "/v1/openai/v1/chat/completions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List chat completions.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_chat_completions_v1_openai_v1_chat_completions_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "model", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "A ListOpenAIChatCompletionResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIChatCompletionResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create chat completions.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_chat_completion_v1_openai_v1_chat_completions_post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIChatCompletionRequestWithExtraBody" + } + } + } + }, + "responses": { + "200": { + "description": "An OpenAIChatCompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIChatCompletion" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/chat/completions/{completion_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get chat completion.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_chat_completion_v1_openai_v1_chat_completions__completion_id__get", + "deprecated": true, + "parameters": [ + { + "name": "completion_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Completion Id" + }, + "description": "ID of the chat completion." + } + ], + "responses": { + "200": { + "description": "A OpenAICompletionWithInputMessages.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/completions": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create completion.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_completion_v1_openai_v1_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An OpenAICompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletion" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/openai/v1/embeddings": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create embeddings.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_embeddings_v1_openai_v1_embeddings_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An OpenAIEmbeddingsResponse containing the embeddings.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/openai/v1/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "List files.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_v1_openai_v1_files_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "purpose", + "in": "query", + "required": true, + "schema": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 10000, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "An ListOpenAIFileResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIFileResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Upload file.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "openai_upload_file_v1_openai_v1_files_post", + "deprecated": true, + "responses": { + "200": { + "description": "An OpenAIFileObject representing the uploaded file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/files/{file_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_file_v1_openai_v1_files__file_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "An OpenAIFileDeleteResponse indicating successful deletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_file_v1_openai_v1_files__file_id__get", + "deprecated": true, + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "An OpenAIFileObject containing file information.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/files/{file_id}/content": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve file content.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "openai_retrieve_file_content_v1_openai_v1_files__file_id__content_get", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "The raw file content as a binary response.", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/models": { + "get": { + "tags": [ + "V1" + ], + "summary": "List models using the OpenAI API.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "openai_list_models_v1_openai_v1_models_get", + "responses": { + "200": { + "description": "A OpenAIListModelsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIListModelsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/openai/v1/moderations": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create moderation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_moderation_v1_openai_v1_moderations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_moderations_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A moderation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModerationObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/openai/v1/responses": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all responses.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_openai_responses_v1_openai_v1_responses_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "model", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 50, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "A ListOpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create a model response.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_openai_response_v1_openai_v1_responses_post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_responses_Request" + } + } + } + }, + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/responses/{response_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a response.", + "description": "Query endpoint for proper schema generation.", + "operationId": "delete_openai_response_v1_openai_v1_responses__response_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the OpenAI response to delete." + } + ], + "responses": { + "200": { + "description": "An OpenAIDeleteResponseObject", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIDeleteResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a model response.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_openai_response_v1_openai_v1_responses__response_id__get", + "deprecated": true, + "parameters": [ + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the OpenAI response to retrieve." + } + ], + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/responses/{response_id}/input_items": { + "get": { + "tags": [ + "V1" + ], + "summary": "List input items.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_openai_response_input_items_v1_openai_v1_responses__response_id__input_items_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "include", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Include" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + }, + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the response to retrieve input items for." + } + ], + "responses": { + "200": { + "description": "An ListOpenAIResponseInputItem.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseInputItem" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/vector_stores": { + "get": { + "tags": [ + "V1" + ], + "summary": "Returns a list of vector stores.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_vector_stores_v1_openai_v1_vector_stores_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + } + ], + "responses": { + "200": { + "description": "A VectorStoreListResponse containing the list of vector stores.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreListResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Creates a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_create_vector_store_v1_openai_v1_vector_stores_post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICreateVectorStoreRequestWithExtraBody" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreObject representing the created vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_vector_store_v1_openai_v1_vector_stores__vector_store_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to delete." + } + ], + "responses": { + "200": { + "description": "A VectorStoreDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_v1_openai_v1_vector_stores__vector_store_id__get", + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to retrieve." + } + ], + "responses": { + "200": { + "description": "A VectorStoreObject representing the vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Updates a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_update_vector_store_v1_openai_v1_vector_stores__vector_store_id__post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_vector_stores_vector_store_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreObject representing the updated vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to update." + } + ] + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/file_batches": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a vector store file batch.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_create_vector_store_file_batch_v1_openai_v1_vector_stores__vector_store_id__file_batches_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICreateVectorStoreFileBatchRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the created file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to create the file batch for." + } + ] + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve a vector store file batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_batch_v1_openai_v1_vector_stores__vector_store_id__file_batches__batch_id__get", + "deprecated": true, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the file batch to retrieve." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file batch." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancels a vector store file batch.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_cancel_vector_store_file_batch_v1_openai_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the cancelled file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file batch to cancel." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store containing the file batch." + } + ] + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "Returns a list of vector store files in a batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_in_vector_store_file_batch_v1_openai_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "filter", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Filter" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + }, + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the file batch to list files from." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file batch." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFilesListInBatchResponse containing the list of files in the batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFilesListInBatchResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "List files in a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_in_vector_store_v1_openai_v1_vector_stores__vector_store_id__files_get", + "deprecated": true, + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "filter", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Filter" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to list files from." + } + ], + "responses": { + "200": { + "description": "A VectorStoreListFilesResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreListFilesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Attach a file to a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_attach_file_to_vector_store_v1_openai_v1_vector_stores__vector_store_id__files_post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_vector_stores_vector_store_id_files_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the attached file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to attach the file to." + } + ] + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_vector_store_file_v1_openai_v1_vector_stores__vector_store_id__files__file_id__delete", + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to delete." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to delete." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_v1_openai_v1_vector_stores__vector_store_id__files__file_id__get", + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to retrieve." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to retrieve." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Updates a vector store file.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_update_vector_store_file_v1_openai_v1_vector_stores__vector_store_id__files__file_id__post", + "deprecated": true, + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_vector_stores_vector_store_id_files_file_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the updated file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store containing the file to update." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file to update." + } + ] + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/content": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves the contents of a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_contents_v1_openai_v1_vector_stores__vector_store_id__files__file_id__content_get", + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to retrieve." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to retrieve." + } + ], + "responses": { + "200": { + "description": "A list of InterleavedContent representing the file contents.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileContentsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/openai/v1/vector_stores/{vector_store_id}/search": { + "post": { + "tags": [ + "V1" + ], + "summary": "Search for chunks in a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_search_vector_store_v1_openai_v1_vector_stores__vector_store_id__search_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_openai_v1_vector_stores_vector_store_id_search_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreSearchResponse containing the search results.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreSearchResponsePage" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to search." + } + ] + } + }, + "/v1/post-training/job/artifacts": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get the artifacts of a training job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_training_job_artifacts_v1_post_training_job_artifacts_get", + "deprecated": true, + "parameters": [ + { + "name": "job_uuid", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Job Uuid" + } + } + ], + "responses": { + "200": { + "description": "A PostTrainingJobArtifactsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJobArtifactsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/post-training/job/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancel a training job.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "cancel_training_job_v1_post_training_job_cancel_post", + "deprecated": true, + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/post-training/job/status": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get the status of a training job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_training_job_status_v1_post_training_job_status_get", + "deprecated": true, + "parameters": [ + { + "name": "job_uuid", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Job Uuid" + } + } + ], + "responses": { + "200": { + "description": "A PostTrainingJobStatusResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJobStatusResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/post-training/jobs": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get all training jobs.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "get_training_jobs_v1_post_training_jobs_get", + "responses": { + "200": { + "description": "A ListPostTrainingJobsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPostTrainingJobsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/post-training/preference-optimize": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run preference optimization of a model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "preference_optimize_v1_post_training_preference_optimize_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DPOAlignmentConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A PostTrainingJob.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJob" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + }, + "/v1/post-training/supervised-fine-tune": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run supervised fine-tuning of a model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "supervised_fine_tune_v1_post_training_supervised_fine_tune_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrainingConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A PostTrainingJob.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJob" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "deprecated": true + } + } + }, + "components": { + "schemas": { + "AgentCandidate": { + "properties": { + "type": { + "type": "string", + "const": "agent", + "title": "Type", + "default": "agent" + }, + "config": { + "$ref": "#/components/schemas/AgentConfig" + } + }, + "type": "object", + "required": [ + "config" + ], + "title": "AgentCandidate", + "description": "An agent candidate for evaluation." + }, + "AgentConfig": { + "properties": { + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "input_shields": { + "title": "Input Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "output_shields": { + "title": "Output Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "toolgroups": { + "title": "Toolgroups", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ] + }, + "type": "array" + }, + "client_tools": { + "title": "Client Tools", + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "type": "array" + }, + "tool_choice": { + "deprecated": true, + "$ref": "#/components/schemas/ToolChoice" + }, + "tool_prompt_format": { + "deprecated": true, + "$ref": "#/components/schemas/ToolPromptFormat" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + }, + "max_infer_iters": { + "title": "Max Infer Iters", + "default": 10, + "type": "integer" + }, + "model": { + "type": "string", + "title": "Model" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "name": { + "title": "Name", + "type": "string" + }, + "enable_session_persistence": { + "title": "Enable Session Persistence", + "default": false, + "type": "boolean" + }, + "response_format": { + "title": "Response Format", + "oneOf": [ + { + "$ref": "#/components/schemas/JsonSchemaResponseFormat" + }, + { + "$ref": "#/components/schemas/GrammarResponseFormat" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "grammar": "#/components/schemas/GrammarResponseFormat", + "json_schema": "#/components/schemas/JsonSchemaResponseFormat" + } + } + } + }, + "type": "object", + "required": [ + "model", + "instructions" + ], + "title": "AgentConfig", + "description": "Configuration for an agent." + }, + "AgentCreateResponse": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + } + }, + "type": "object", + "required": [ + "agent_id" + ], + "title": "AgentCreateResponse", + "description": "Response returned when creating a new agent." + }, + "AgentSessionCreateResponse": { + "properties": { + "session_id": { + "type": "string", + "title": "Session Id" + } + }, + "type": "object", + "required": [ + "session_id" + ], + "title": "AgentSessionCreateResponse", + "description": "Response returned when creating a new agent session." + }, + "AgentToolGroupWithArgs": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "args": { + "additionalProperties": true, + "type": "object", + "title": "Args" + } + }, + "type": "object", + "required": [ + "name", + "args" + ], + "title": "AgentToolGroupWithArgs" + }, + "AgentTurnInputType": { + "properties": { + "type": { + "type": "string", + "const": "agent_turn_input", + "title": "Type", + "default": "agent_turn_input" + } + }, + "type": "object", + "title": "AgentTurnInputType", + "description": "Parameter type for agent turn input." + }, + "AggregationFunctionType": { + "type": "string", + "enum": [ + "average", + "weighted_average", + "median", + "categorical_count", + "accuracy" + ], + "title": "AggregationFunctionType", + "description": "Types of aggregation functions for scoring results." + }, + "AllowedToolsFilter": { + "properties": { + "tool_names": { + "title": "Tool Names", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "AllowedToolsFilter", + "description": "Filter configuration for restricting which MCP tools can be used." + }, + "ApprovalFilter": { + "properties": { + "always": { + "title": "Always", + "items": { + "type": "string" + }, + "type": "array" + }, + "never": { + "title": "Never", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "ApprovalFilter", + "description": "Filter configuration for MCP tool approval requirements." + }, + "ArrayType": { + "properties": { + "type": { + "type": "string", + "const": "array", + "title": "Type", + "default": "array" + } + }, + "type": "object", + "title": "ArrayType", + "description": "Parameter type for array values." + }, + "Attachment-Input": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Attachment", + "description": "An attachment to an agent turn." + }, + "Attachment-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Attachment", + "description": "An attachment to an agent turn." + }, + "BasicScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "basic", + "title": "Type", + "default": "basic" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "BasicScoringFnParams", + "description": "Parameters for basic scoring function configuration." + }, + "Batch": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "object": { + "type": "string", + "const": "batch", + "title": "Object" + }, + "status": { + "type": "string", + "enum": [ + "validating", + "failed", + "in_progress", + "finalizing", + "completed", + "expired", + "cancelling", + "cancelled" + ], + "title": "Status" + }, + "cancelled_at": { + "title": "Cancelled At", + "type": "integer" + }, + "cancelling_at": { + "title": "Cancelling At", + "type": "integer" + }, + "completed_at": { + "title": "Completed At", + "type": "integer" + }, + "error_file_id": { + "title": "Error File Id", + "type": "string" + }, + "errors": { + "$ref": "#/components/schemas/Errors" + }, + "expired_at": { + "title": "Expired At", + "type": "integer" + }, + "expires_at": { + "title": "Expires At", + "type": "integer" + }, + "failed_at": { + "title": "Failed At", + "type": "integer" + }, + "finalizing_at": { + "title": "Finalizing At", + "type": "integer" + }, + "in_progress_at": { + "title": "In Progress At", + "type": "integer" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "model": { + "title": "Model", + "type": "string" + }, + "output_file_id": { + "title": "Output File Id", + "type": "string" + }, + "request_counts": { + "$ref": "#/components/schemas/BatchRequestCounts" + }, + "usage": { + "$ref": "#/components/schemas/BatchUsage" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "id", + "completion_window", + "created_at", + "endpoint", + "input_file_id", + "object", + "status" + ], + "title": "Batch" + }, + "BatchError": { + "properties": { + "code": { + "title": "Code", + "type": "string" + }, + "line": { + "title": "Line", + "type": "integer" + }, + "message": { + "title": "Message", + "type": "string" + }, + "param": { + "title": "Param", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "title": "BatchError" + }, + "BatchRequestCounts": { + "properties": { + "completed": { + "type": "integer", + "title": "Completed" + }, + "failed": { + "type": "integer", + "title": "Failed" + }, + "total": { + "type": "integer", + "title": "Total" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "completed", + "failed", + "total" + ], + "title": "BatchRequestCounts" + }, + "BatchUsage": { + "properties": { + "input_tokens": { + "type": "integer", + "title": "Input Tokens" + }, + "input_tokens_details": { + "$ref": "#/components/schemas/InputTokensDetails" + }, + "output_tokens": { + "type": "integer", + "title": "Output Tokens" + }, + "output_tokens_details": { + "$ref": "#/components/schemas/OutputTokensDetails" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "input_tokens", + "input_tokens_details", + "output_tokens", + "output_tokens_details", + "total_tokens" + ], + "title": "BatchUsage" + }, + "Benchmark": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "benchmark", + "title": "Type", + "default": "benchmark" + }, + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "scoring_functions": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Scoring Functions" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Metadata for this evaluation task" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "dataset_id", + "scoring_functions" + ], + "title": "Benchmark", + "description": "A benchmark resource for evaluating model performance." + }, + "BenchmarkConfig": { + "properties": { + "eval_candidate": { + "oneOf": [ + { + "$ref": "#/components/schemas/ModelCandidate" + }, + { + "$ref": "#/components/schemas/AgentCandidate" + } + ], + "title": "Eval Candidate", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent": "#/components/schemas/AgentCandidate", + "model": "#/components/schemas/ModelCandidate" + } + } + }, + "scoring_params": { + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + }, + "type": "object", + "title": "Scoring Params", + "description": "Map between scoring function id and parameters for each scoring function you want to run" + }, + "num_examples": { + "title": "Num Examples", + "description": "Number of examples to evaluate (useful for testing), if not provided, all examples in the dataset will be evaluated", + "type": "integer" + } + }, + "type": "object", + "required": [ + "eval_candidate" + ], + "title": "BenchmarkConfig", + "description": "A benchmark configuration for evaluation." + }, + "BooleanType": { + "properties": { + "type": { + "type": "string", + "const": "boolean", + "title": "Type", + "default": "boolean" + } + }, + "type": "object", + "title": "BooleanType", + "description": "Parameter type for boolean values." + }, + "BuiltinTool": { + "type": "string", + "enum": [ + "brave_search", + "wolfram_alpha", + "photogen", + "code_interpreter" + ], + "title": "BuiltinTool" + }, + "ChatCompletionInputType": { + "properties": { + "type": { + "type": "string", + "const": "chat_completion_input", + "title": "Type", + "default": "chat_completion_input" + } + }, + "type": "object", + "title": "ChatCompletionInputType", + "description": "Parameter type for chat completion input." + }, + "Chunk-Input": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "chunk_id": { + "type": "string", + "title": "Chunk Id" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "embedding": { + "title": "Embedding", + "items": { + "type": "number" + }, + "type": "array" + }, + "chunk_metadata": { + "$ref": "#/components/schemas/ChunkMetadata" + } + }, + "type": "object", + "required": [ + "content", + "chunk_id" + ], + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." + }, + "Chunk-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "chunk_id": { + "type": "string", + "title": "Chunk Id" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "embedding": { + "title": "Embedding", + "items": { + "type": "number" + }, + "type": "array" + }, + "chunk_metadata": { + "$ref": "#/components/schemas/ChunkMetadata" + } + }, + "type": "object", + "required": [ + "content", + "chunk_id" + ], + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." + }, + "ChunkMetadata": { + "properties": { + "chunk_id": { + "title": "Chunk Id", + "type": "string" + }, + "document_id": { + "title": "Document Id", + "type": "string" + }, + "source": { + "title": "Source", + "type": "string" + }, + "created_timestamp": { + "title": "Created Timestamp", + "type": "integer" + }, + "updated_timestamp": { + "title": "Updated Timestamp", + "type": "integer" + }, + "chunk_window": { + "title": "Chunk Window", + "type": "string" + }, + "chunk_tokenizer": { + "title": "Chunk Tokenizer", + "type": "string" + }, + "chunk_embedding_model": { + "title": "Chunk Embedding Model", + "type": "string" + }, + "chunk_embedding_dimension": { + "title": "Chunk Embedding Dimension", + "type": "integer" + }, + "content_token_count": { + "title": "Content Token Count", + "type": "integer" + }, + "metadata_token_count": { + "title": "Metadata Token Count", + "type": "integer" + } + }, + "type": "object", + "title": "ChunkMetadata", + "description": "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference." + }, + "CompletionInputType": { + "properties": { + "type": { + "type": "string", + "const": "completion_input", + "title": "Type", + "default": "completion_input" + } + }, + "type": "object", + "title": "CompletionInputType", + "description": "Parameter type for completion input." + }, + "CompletionMessage-Input": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "description": "A message containing the model's (assistant) response in a chat conversation." + }, + "CompletionMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "description": "A message containing the model's (assistant) response in a chat conversation." + }, + "Conversation": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "The unique ID of the conversation." + }, + "object": { + "type": "string", + "const": "conversation", + "title": "Object", + "description": "The object type, which is always conversation.", + "default": "conversation" + }, + "created_at": { + "type": "integer", + "title": "Created At", + "description": "The time at which the conversation was created, measured in seconds since the Unix epoch." + }, + "metadata": { + "title": "Metadata", + "description": "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "items": { + "title": "Items", + "description": "Initial items to include in the conversation context. You may add up to 20 items at a time.", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "id", + "created_at" + ], + "title": "Conversation", + "description": "OpenAI-compatible conversation object." + }, + "ConversationItemInclude": { + "type": "string", + "enum": [ + "web_search_call.action.sources", + "code_interpreter_call.outputs", + "computer_call_output.output.image_url", + "file_search_call.results", + "message.input_image.image_url", + "message.output_text.logprobs", + "reasoning.encrypted_content" + ], + "title": "ConversationItemInclude", + "description": "Specify additional output data to include in the model response." + }, + "ConversationItemList": { + "properties": { + "object": { + "type": "string", + "title": "Object", + "description": "Object type", + "default": "list" + }, + "data": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Output" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/components/schemas/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Output", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "type": "array", + "title": "Data", + "description": "List of conversation items" + }, + "first_id": { + "title": "First Id", + "description": "The ID of the first item in the list", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "description": "The ID of the last item in the list", + "type": "string" + }, + "has_more": { + "type": "boolean", + "title": "Has More", + "description": "Whether there are more items available", + "default": false + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ConversationItemList", + "description": "List of conversation items with pagination." + }, + "DPOAlignmentConfig": { + "properties": { + "beta": { + "type": "number", + "title": "Beta" + }, + "loss_type": { + "$ref": "#/components/schemas/DPOLossType", + "default": "sigmoid" + } + }, + "type": "object", + "required": [ + "beta" + ], + "title": "DPOAlignmentConfig", + "description": "Configuration for Direct Preference Optimization (DPO) alignment." + }, + "DPOLossType": { + "type": "string", + "enum": [ + "sigmoid", + "hinge", + "ipo", + "kto_pair" + ], + "title": "DPOLossType" + }, + "DataConfig": { + "properties": { + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "batch_size": { + "type": "integer", + "title": "Batch Size" + }, + "shuffle": { + "type": "boolean", + "title": "Shuffle" + }, + "data_format": { + "$ref": "#/components/schemas/DatasetFormat" + }, + "validation_dataset_id": { + "title": "Validation Dataset Id", + "type": "string" + }, + "packed": { + "title": "Packed", + "default": false, + "type": "boolean" + }, + "train_on_input": { + "title": "Train On Input", + "default": false, + "type": "boolean" + } + }, + "type": "object", + "required": [ + "dataset_id", + "batch_size", + "shuffle", + "data_format" + ], + "title": "DataConfig", + "description": "Configuration for training data and data loading." + }, + "Dataset": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "dataset", + "title": "Type", + "default": "dataset" + }, + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "source": { + "oneOf": [ + { + "$ref": "#/components/schemas/URIDataSource" + }, + { + "$ref": "#/components/schemas/RowsDataSource" + } + ], + "title": "Source", + "discriminator": { + "propertyName": "type", + "mapping": { + "rows": "#/components/schemas/RowsDataSource", + "uri": "#/components/schemas/URIDataSource" + } + } + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this dataset" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "purpose", + "source" + ], + "title": "Dataset", + "description": "Dataset resource for storing and accessing training or evaluation data." + }, + "DatasetFormat": { + "type": "string", + "enum": [ + "instruct", + "dialog" + ], + "title": "DatasetFormat", + "description": "Format of the training dataset." + }, + "DatasetPurpose": { + "type": "string", + "enum": [ + "post-training/messages", + "eval/question-answer", + "eval/messages-answer" + ], + "title": "DatasetPurpose", + "description": "Purpose of the dataset. Each purpose has a required input data schema." + }, + "DefaultRAGQueryGeneratorConfig": { + "properties": { + "type": { + "type": "string", + "const": "default", + "title": "Type", + "default": "default" + }, + "separator": { + "type": "string", + "title": "Separator", + "default": " " + } + }, + "type": "object", + "title": "DefaultRAGQueryGeneratorConfig", + "description": "Configuration for the default RAG query generator." + }, + "Document": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Document", + "description": "A document to be used by an agent." + }, + "EfficiencyConfig": { + "properties": { + "enable_activation_checkpointing": { + "title": "Enable Activation Checkpointing", + "default": false, + "type": "boolean" + }, + "enable_activation_offloading": { + "title": "Enable Activation Offloading", + "default": false, + "type": "boolean" + }, + "memory_efficient_fsdp_wrap": { + "title": "Memory Efficient Fsdp Wrap", + "default": false, + "type": "boolean" + }, + "fsdp_cpu_offload": { + "title": "Fsdp Cpu Offload", + "default": false, + "type": "boolean" + } + }, + "type": "object", + "title": "EfficiencyConfig", + "description": "Configuration for memory and compute efficiency optimizations." + }, + "Errors": { + "properties": { + "data": { + "title": "Data", + "items": { + "$ref": "#/components/schemas/BatchError" + }, + "type": "array" + }, + "object": { + "title": "Object", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "title": "Errors" + }, + "EvaluateResponse": { + "properties": { + "generations": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Generations" + }, + "scores": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Scores" + } + }, + "type": "object", + "required": [ + "generations", + "scores" + ], + "title": "EvaluateResponse", + "description": "The response from an evaluation." + }, + "GrammarResponseFormat": { + "properties": { + "type": { + "type": "string", + "const": "grammar", + "title": "Type", + "default": "grammar" + }, + "bnf": { + "additionalProperties": true, + "type": "object", + "title": "Bnf" + } + }, + "type": "object", + "required": [ + "bnf" + ], + "title": "GrammarResponseFormat", + "description": "Configuration for grammar-guided response generation." + }, + "GreedySamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "greedy", + "title": "Type", + "default": "greedy" + } + }, + "type": "object", + "title": "GreedySamplingStrategy", + "description": "Greedy sampling strategy that selects the highest probability token at each step." + }, + "HealthInfo": { + "properties": { + "status": { + "$ref": "#/components/schemas/HealthStatus" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "HealthInfo", + "description": "Health status information for the service." + }, + "HealthStatus": { + "type": "string", + "enum": [ + "OK", + "Error", + "Not Implemented" + ], + "title": "HealthStatus" + }, + "ImageContentItem-Input": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "ImageContentItem-Output": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "InferenceStep-Input": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "inference", + "title": "Step Type", + "default": "inference" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage-Input" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "description": "An inference step in an agent turn." + }, + "InferenceStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "inference", + "title": "Step Type", + "default": "inference" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage-Output" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "description": "An inference step in an agent turn." + }, + "InputTokensDetails": { + "properties": { + "cached_tokens": { + "type": "integer", + "title": "Cached Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "cached_tokens" + ], + "title": "InputTokensDetails" + }, + "Job": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + } + }, + "type": "object", + "required": [ + "job_id", + "status" + ], + "title": "Job", + "description": "A job execution instance with status tracking." + }, + "JobStatus": { + "type": "string", + "enum": [ + "completed", + "in_progress", + "failed", + "scheduled", + "cancelled" + ], + "title": "JobStatus", + "description": "Status of a job execution." + }, + "JsonSchemaResponseFormat": { + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "title": "Type", + "default": "json_schema" + }, + "json_schema": { + "additionalProperties": true, + "type": "object", + "title": "Json Schema" + } + }, + "type": "object", + "required": [ + "json_schema" + ], + "title": "JsonSchemaResponseFormat", + "description": "Configuration for JSON schema-guided response generation." + }, + "JsonType": { + "properties": { + "type": { + "type": "string", + "const": "json", + "title": "Type", + "default": "json" + } + }, + "type": "object", + "title": "JsonType", + "description": "Parameter type for JSON values." + }, + "LLMAsJudgeScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "llm_as_judge", + "title": "Type", + "default": "llm_as_judge" + }, + "judge_model": { + "type": "string", + "title": "Judge Model" + }, + "prompt_template": { + "title": "Prompt Template", + "type": "string" + }, + "judge_score_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Judge Score Regexes", + "description": "Regexes to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "required": [ + "judge_model" + ], + "title": "LLMAsJudgeScoringFnParams", + "description": "Parameters for LLM-as-judge scoring function configuration." + }, + "LLMRAGQueryGeneratorConfig": { + "properties": { + "type": { + "type": "string", + "const": "llm", + "title": "Type", + "default": "llm" + }, + "model": { + "type": "string", + "title": "Model" + }, + "template": { + "type": "string", + "title": "Template" + } + }, + "type": "object", + "required": [ + "model", + "template" + ], + "title": "LLMRAGQueryGeneratorConfig", + "description": "Configuration for the LLM-based RAG query generator." + }, + "ListBenchmarksResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Benchmark" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListBenchmarksResponse" + }, + "ListDatasetsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Dataset" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListDatasetsResponse", + "description": "Response from listing datasets." + }, + "ListModelsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Model" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListModelsResponse" + }, + "ListPostTrainingJobsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/PostTrainingJob" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListPostTrainingJobsResponse" + }, + "ListPromptsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Prompt" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListPromptsResponse", + "description": "Response model to list prompts." + }, + "ListProvidersResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ProviderInfo" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListProvidersResponse", + "description": "Response containing a list of all available providers." + }, + "ListRoutesResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/RouteInfo" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListRoutesResponse", + "description": "Response containing a list of all available API routes." + }, + "ListScoringFunctionsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ScoringFn-Output" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListScoringFunctionsResponse" + }, + "ListShieldsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Shield" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListShieldsResponse" + }, + "ListToolGroupsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ToolGroup" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListToolGroupsResponse", + "description": "Response containing a list of tool groups." + }, + "MCPListToolsTool": { + "properties": { + "input_schema": { + "additionalProperties": true, + "type": "object", + "title": "Input Schema" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "type": "object", + "required": [ + "input_schema", + "name" + ], + "title": "MCPListToolsTool", + "description": "Tool definition returned by MCP list tools operation." + }, + "MemoryRetrievalStep-Input": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "memory_retrieval", + "title": "Step Type", + "default": "memory_retrieval" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "description": "A memory retrieval step in an agent turn." + }, + "MemoryRetrievalStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "memory_retrieval", + "title": "Step Type", + "default": "memory_retrieval" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "description": "A memory retrieval step in an agent turn." + }, + "Model": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "model", + "title": "Type", + "default": "model" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this model" + }, + "model_type": { + "$ref": "#/components/schemas/ModelType", + "default": "llm" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "Model", + "description": "A model resource representing an AI model registered in Llama Stack." + }, + "ModelCandidate": { + "properties": { + "type": { + "type": "string", + "const": "model", + "title": "Type", + "default": "model" + }, + "model": { + "type": "string", + "title": "Model" + }, + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "system_message": { + "$ref": "#/components/schemas/SystemMessage" + } + }, + "type": "object", + "required": [ + "model", + "sampling_params" + ], + "title": "ModelCandidate", + "description": "A model candidate for evaluation." + }, + "ModelType": { + "type": "string", + "enum": [ + "llm", + "embedding", + "rerank" + ], + "title": "ModelType", + "description": "Enumeration of supported model types in Llama Stack." + }, + "ModerationObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "results": { + "items": { + "$ref": "#/components/schemas/ModerationObjectResults" + }, + "type": "array", + "title": "Results" + } + }, + "type": "object", + "required": [ + "id", + "model", + "results" + ], + "title": "ModerationObject", + "description": "A moderation object." + }, + "ModerationObjectResults": { + "properties": { + "flagged": { + "type": "boolean", + "title": "Flagged" + }, + "categories": { + "title": "Categories", + "additionalProperties": { + "type": "boolean" + }, + "type": "object" + }, + "category_applied_input_types": { + "title": "Category Applied Input Types", + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "type": "object" + }, + "category_scores": { + "title": "Category Scores", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "flagged" + ], + "title": "ModerationObjectResults", + "description": "A moderation object." + }, + "NumberType": { + "properties": { + "type": { + "type": "string", + "const": "number", + "title": "Type", + "default": "number" + } + }, + "type": "object", + "title": "NumberType", + "description": "Parameter type for numeric values." + }, + "ObjectType": { + "properties": { + "type": { + "type": "string", + "const": "object", + "title": "Type", + "default": "object" + } + }, + "type": "object", + "title": "ObjectType", + "description": "Parameter type for object values." + }, + "OpenAIAssistantMessageParam-Input": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIAssistantMessageParam-Output": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletion": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChoice-Output" + }, + "type": "array", + "title": "Choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "title": "Object", + "default": "chat.completion" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "model": { + "type": "string", + "title": "Model" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage" + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAIChatCompletion", + "description": "Response from an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletionContentPartImageParam": { + "properties": { + "type": { + "type": "string", + "const": "image_url", + "title": "Type", + "default": "image_url" + }, + "image_url": { + "$ref": "#/components/schemas/OpenAIImageURL" + } + }, + "type": "object", + "required": [ + "image_url" + ], + "title": "OpenAIChatCompletionContentPartImageParam", + "description": "Image content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionContentPartTextParam": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIChatCompletionContentPartTextParam", + "description": "Text content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "messages": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Input", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Input" + } + } + }, + "type": "array", + "minItems": 1, + "title": "Messages" + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ], + "title": "Function Call" + }, + "functions": { + "title": "Functions", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + "logit_bias": { + "title": "Logit Bias", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "logprobs": { + "title": "Logprobs", + "type": "boolean" + }, + "max_completion_tokens": { + "title": "Max Completion Tokens", + "type": "integer" + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "n": { + "title": "N", + "type": "integer" + }, + "parallel_tool_calls": { + "title": "Parallel Tool Calls", + "type": "boolean" + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number" + }, + "response_format": { + "title": "Response Format", + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseFormatText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONSchema" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONObject" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "json_object": "#/components/schemas/OpenAIResponseFormatJSONObject", + "json_schema": "#/components/schemas/OpenAIResponseFormatJSONSchema", + "text": "#/components/schemas/OpenAIResponseFormatText" + } + } + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Stop" + }, + "stream": { + "title": "Stream", + "type": "boolean" + }, + "stream_options": { + "title": "Stream Options", + "additionalProperties": true, + "type": "object" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ], + "title": "Tool Choice" + }, + "tools": { + "title": "Tools", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + "top_logprobs": { + "title": "Top Logprobs", + "type": "integer" + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "user": { + "title": "User", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "messages" + ], + "title": "OpenAIChatCompletionRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible chat completion endpoint." + }, + "OpenAIChatCompletionToolCall": { + "properties": { + "index": { + "title": "Index", + "type": "integer" + }, + "id": { + "title": "Id", + "type": "string" + }, + "type": { + "type": "string", + "const": "function", + "title": "Type", + "default": "function" + }, + "function": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction" + } + }, + "type": "object", + "title": "OpenAIChatCompletionToolCall", + "description": "Tool call specification for OpenAI-compatible chat completion responses." + }, + "OpenAIChatCompletionToolCallFunction": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "arguments": { + "title": "Arguments", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIChatCompletionToolCallFunction", + "description": "Function call details for OpenAI-compatible tool calls." + }, + "OpenAIChatCompletionUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "completion_tokens": { + "type": "integer", + "title": "Completion Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + }, + "prompt_tokens_details": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsagePromptTokensDetails" + }, + "completion_tokens_details": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsageCompletionTokensDetails" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "completion_tokens", + "total_tokens" + ], + "title": "OpenAIChatCompletionUsage", + "description": "Usage information for OpenAI chat completion." + }, + "OpenAIChatCompletionUsageCompletionTokensDetails": { + "properties": { + "reasoning_tokens": { + "title": "Reasoning Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIChatCompletionUsageCompletionTokensDetails", + "description": "Token details for output tokens in OpenAI chat completion usage." + }, + "OpenAIChatCompletionUsagePromptTokensDetails": { + "properties": { + "cached_tokens": { + "title": "Cached Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIChatCompletionUsagePromptTokensDetails", + "description": "Token details for prompt tokens in OpenAI chat completion usage." + }, + "OpenAIChoice-Input": { + "properties": { + "message": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message", + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Input", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Input" + } + } + }, + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Input" + } + }, + "type": "object", + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "description": "A choice from an OpenAI-compatible chat completion response." + }, + "OpenAIChoice-Output": { + "properties": { + "message": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Output" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Output" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message", + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Output", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Output" + } + } + }, + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Output" + } + }, + "type": "object", + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "description": "A choice from an OpenAI-compatible chat completion response." + }, + "OpenAIChoiceLogprobs-Input": { + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIChoiceLogprobs", + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." + }, + "OpenAIChoiceLogprobs-Output": { + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIChoiceLogprobs", + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." + }, + "OpenAICompletion": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAICompletionChoice-Output" + }, + "type": "array", + "title": "Choices" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "model": { + "type": "string", + "title": "Model" + }, + "object": { + "type": "string", + "const": "text_completion", + "title": "Object", + "default": "text_completion" + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAICompletion", + "description": "Response from an OpenAI-compatible completion request." + }, + "OpenAICompletionChoice-Input": { + "properties": { + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "text": { + "type": "string", + "title": "Text" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Input" + } + }, + "type": "object", + "required": [ + "finish_reason", + "text", + "index" + ], + "title": "OpenAICompletionChoice", + "description": "A choice from an OpenAI-compatible completion response." + }, + "OpenAICompletionChoice-Output": { + "properties": { + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "text": { + "type": "string", + "title": "Text" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Output" + } + }, + "type": "object", + "required": [ + "finish_reason", + "text", + "index" + ], + "title": "OpenAICompletionChoice", + "description": "A choice from an OpenAI-compatible completion response." + }, + "OpenAICompletionRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "items": { + "items": { + "type": "integer" + }, + "type": "array" + }, + "type": "array" + } + ], + "title": "Prompt" + }, + "best_of": { + "title": "Best Of", + "type": "integer" + }, + "echo": { + "title": "Echo", + "type": "boolean" + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number" + }, + "logit_bias": { + "title": "Logit Bias", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "logprobs": { + "title": "Logprobs", + "type": "boolean" + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "n": { + "title": "N", + "type": "integer" + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number" + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Stop" + }, + "stream": { + "title": "Stream", + "type": "boolean" + }, + "stream_options": { + "title": "Stream Options", + "additionalProperties": true, + "type": "object" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "user": { + "title": "User", + "type": "string" + }, + "suffix": { + "title": "Suffix", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "prompt" + ], + "title": "OpenAICompletionRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible completion endpoint." + }, + "OpenAICreateVectorStoreFileBatchRequestWithExtraBody": { + "properties": { + "file_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "File Ids" + }, + "attributes": { + "title": "Attributes", + "additionalProperties": true, + "type": "object" + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "file_ids" + ], + "title": "OpenAICreateVectorStoreFileBatchRequestWithExtraBody", + "description": "Request to create a vector store file batch with extra_body support." + }, + "OpenAICreateVectorStoreRequestWithExtraBody": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "file_ids": { + "title": "File Ids", + "items": { + "type": "string" + }, + "type": "array" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object" + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "additionalProperties": true, + "type": "object", + "title": "OpenAICreateVectorStoreRequestWithExtraBody", + "description": "Request to create a vector store with extra_body support." + }, + "OpenAIDeveloperMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "developer", + "title": "Role", + "default": "developer" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIDeveloperMessageParam", + "description": "A message from the developer in an OpenAI-compatible chat completion request." + }, + "OpenAIEmbeddingData": { + "properties": { + "object": { + "type": "string", + "const": "embedding", + "title": "Object", + "default": "embedding" + }, + "embedding": { + "anyOf": [ + { + "items": { + "type": "number" + }, + "type": "array" + }, + { + "type": "string" + } + ], + "title": "Embedding" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "embedding", + "index" + ], + "title": "OpenAIEmbeddingData", + "description": "A single embedding data object from an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "total_tokens" + ], + "title": "OpenAIEmbeddingUsage", + "description": "Usage information for an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingsRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "input": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Input" + }, + "encoding_format": { + "title": "Encoding Format", + "default": "float", + "type": "string" + }, + "dimensions": { + "title": "Dimensions", + "type": "integer" + }, + "user": { + "title": "User", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "input" + ], + "title": "OpenAIEmbeddingsRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible embeddings endpoint." + }, + "OpenAIEmbeddingsResponse": { + "properties": { + "object": { + "type": "string", + "const": "list", + "title": "Object", + "default": "list" + }, + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIEmbeddingData" + }, + "type": "array", + "title": "Data" + }, + "model": { + "type": "string", + "title": "Model" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIEmbeddingUsage" + } + }, + "type": "object", + "required": [ + "data", + "model", + "usage" + ], + "title": "OpenAIEmbeddingsResponse", + "description": "Response from an OpenAI-compatible embeddings request." + }, + "OpenAIFile": { + "properties": { + "type": { + "type": "string", + "const": "file", + "title": "Type", + "default": "file" + }, + "file": { + "$ref": "#/components/schemas/OpenAIFileFile" + } + }, + "type": "object", + "required": [ + "file" + ], + "title": "OpenAIFile" + }, + "OpenAIFileFile": { + "properties": { + "file_data": { + "title": "File Data", + "type": "string" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIFileFile" + }, + "OpenAIFileObject": { + "properties": { + "object": { + "type": "string", + "const": "file", + "title": "Object", + "default": "file" + }, + "id": { + "type": "string", + "title": "Id" + }, + "bytes": { + "type": "integer", + "title": "Bytes" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "expires_at": { + "type": "integer", + "title": "Expires At" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "purpose": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } + }, + "type": "object", + "required": [ + "id", + "bytes", + "created_at", + "expires_at", + "filename", + "purpose" + ], + "title": "OpenAIFileObject", + "description": "OpenAI File object as defined in the OpenAI Files API." + }, + "OpenAIFilePurpose": { + "type": "string", + "enum": [ + "assistants", + "batch" + ], + "title": "OpenAIFilePurpose", + "description": "Valid purpose values for OpenAI Files API." + }, + "OpenAIImageURL": { + "properties": { + "url": { + "type": "string", + "title": "Url" + }, + "detail": { + "title": "Detail", + "type": "string" + } + }, + "type": "object", + "required": [ + "url" + ], + "title": "OpenAIImageURL", + "description": "Image URL specification for OpenAI-compatible chat completion messages." + }, + "OpenAIJSONSchema": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "strict": { + "title": "Strict", + "type": "boolean" + }, + "schema": { + "title": "Schema", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "title": "OpenAIJSONSchema", + "description": "JSON schema specification for OpenAI-compatible structured response format." + }, + "OpenAIListModelsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIModel" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "OpenAIListModelsResponse" + }, + "OpenAIModel": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "const": "model", + "title": "Object", + "default": "model" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "owned_by": { + "type": "string", + "title": "Owned By" + } + }, + "type": "object", + "required": [ + "id", + "created", + "owned_by" + ], + "title": "OpenAIModel", + "description": "A model from OpenAI." + }, + "OpenAIResponseAnnotationCitation": { + "properties": { + "type": { + "type": "string", + "const": "url_citation", + "title": "Type", + "default": "url_citation" + }, + "end_index": { + "type": "integer", + "title": "End Index" + }, + "start_index": { + "type": "integer", + "title": "Start Index" + }, + "title": { + "type": "string", + "title": "Title" + }, + "url": { + "type": "string", + "title": "Url" + } + }, + "type": "object", + "required": [ + "end_index", + "start_index", + "title", + "url" + ], + "title": "OpenAIResponseAnnotationCitation", + "description": "URL citation annotation for referencing external web resources." + }, + "OpenAIResponseAnnotationContainerFileCitation": { + "properties": { + "type": { + "type": "string", + "const": "container_file_citation", + "title": "Type", + "default": "container_file_citation" + }, + "container_id": { + "type": "string", + "title": "Container Id" + }, + "end_index": { + "type": "integer", + "title": "End Index" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "start_index": { + "type": "integer", + "title": "Start Index" + } + }, + "type": "object", + "required": [ + "container_id", + "end_index", + "file_id", + "filename", + "start_index" + ], + "title": "OpenAIResponseAnnotationContainerFileCitation" + }, + "OpenAIResponseAnnotationFileCitation": { + "properties": { + "type": { + "type": "string", + "const": "file_citation", + "title": "Type", + "default": "file_citation" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "file_id", + "filename", + "index" + ], + "title": "OpenAIResponseAnnotationFileCitation", + "description": "File citation annotation for referencing specific files in response content." + }, + "OpenAIResponseAnnotationFilePath": { + "properties": { + "type": { + "type": "string", + "const": "file_path", + "title": "Type", + "default": "file_path" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "file_id", + "index" + ], + "title": "OpenAIResponseAnnotationFilePath" + }, + "OpenAIResponseContentPartRefusal": { + "properties": { + "type": { + "type": "string", + "const": "refusal", + "title": "Type", + "default": "refusal" + }, + "refusal": { + "type": "string", + "title": "Refusal" + } + }, + "type": "object", + "required": [ + "refusal" + ], + "title": "OpenAIResponseContentPartRefusal", + "description": "Refusal content within a streamed response part." + }, + "OpenAIResponseError": { + "properties": { + "code": { + "type": "string", + "title": "Code" + }, + "message": { + "type": "string", + "title": "Message" + } + }, + "type": "object", + "required": [ + "code", + "message" + ], + "title": "OpenAIResponseError", + "description": "Error details for failed OpenAI response requests." + }, + "OpenAIResponseFormatJSONObject": { + "properties": { + "type": { + "type": "string", + "const": "json_object", + "title": "Type", + "default": "json_object" + } + }, + "type": "object", + "title": "OpenAIResponseFormatJSONObject", + "description": "JSON object response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatJSONSchema": { + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "title": "Type", + "default": "json_schema" + }, + "json_schema": { + "$ref": "#/components/schemas/OpenAIJSONSchema" + } + }, + "type": "object", + "required": [ + "json_schema" + ], + "title": "OpenAIResponseFormatJSONSchema", + "description": "JSON schema response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatText": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + } + }, + "type": "object", + "title": "OpenAIResponseFormatText", + "description": "Text response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseInputFunctionToolCallOutput": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "output": { + "type": "string", + "title": "Output" + }, + "type": { + "type": "string", + "const": "function_call_output", + "title": "Type", + "default": "function_call_output" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "call_id", + "output" + ], + "title": "OpenAIResponseInputFunctionToolCallOutput", + "description": "This represents the output of a function call that gets passed back to the model." + }, + "OpenAIResponseInputMessageContentFile": { + "properties": { + "type": { + "type": "string", + "const": "input_file", + "title": "Type", + "default": "input_file" + }, + "file_data": { + "title": "File Data", + "type": "string" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "file_url": { + "title": "File Url", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIResponseInputMessageContentFile", + "description": "File content for input messages in OpenAI response format." + }, + "OpenAIResponseInputMessageContentImage": { + "properties": { + "detail": { + "anyOf": [ + { + "type": "string", + "const": "low" + }, + { + "type": "string", + "const": "high" + }, + { + "type": "string", + "const": "auto" + } + ], + "title": "Detail", + "default": "auto" + }, + "type": { + "type": "string", + "const": "input_image", + "title": "Type", + "default": "input_image" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "image_url": { + "title": "Image Url", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIResponseInputMessageContentImage", + "description": "Image content for input messages in OpenAI response format." + }, + "OpenAIResponseInputMessageContentText": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "input_text", + "title": "Type", + "default": "input_text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIResponseInputMessageContentText", + "description": "Text content for input messages in OpenAI response format." + }, + "OpenAIResponseInputToolFileSearch": { + "properties": { + "type": { + "type": "string", + "const": "file_search", + "title": "Type", + "default": "file_search" + }, + "vector_store_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Vector Store Ids" + }, + "filters": { + "title": "Filters", + "additionalProperties": true, + "type": "object" + }, + "max_num_results": { + "title": "Max Num Results", + "default": 10, + "type": "integer", + "maximum": 50.0, + "minimum": 1.0 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + } + }, + "type": "object", + "required": [ + "vector_store_ids" + ], + "title": "OpenAIResponseInputToolFileSearch", + "description": "File search tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolFunction": { + "properties": { + "type": { + "type": "string", + "const": "function", + "title": "Type", + "default": "function" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "parameters": { + "title": "Parameters", + "additionalProperties": true, + "type": "object" + }, + "strict": { + "title": "Strict", + "type": "boolean" + } + }, + "type": "object", + "required": [ + "name", + "parameters" + ], + "title": "OpenAIResponseInputToolFunction", + "description": "Function tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolMCP": { + "properties": { + "type": { + "type": "string", + "const": "mcp", + "title": "Type", + "default": "mcp" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "server_url": { + "type": "string", + "title": "Server Url" + }, + "headers": { + "title": "Headers", + "additionalProperties": true, + "type": "object" + }, + "require_approval": { + "anyOf": [ + { + "type": "string", + "const": "always" + }, + { + "type": "string", + "const": "never" + }, + { + "$ref": "#/components/schemas/ApprovalFilter" + } + ], + "title": "Require Approval", + "default": "never" + }, + "allowed_tools": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/AllowedToolsFilter" + } + ], + "title": "Allowed Tools" + } + }, + "type": "object", + "required": [ + "server_label", + "server_url" + ], + "title": "OpenAIResponseInputToolMCP", + "description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolWebSearch": { + "properties": { + "type": { + "anyOf": [ + { + "type": "string", + "const": "web_search" + }, + { + "type": "string", + "const": "web_search_preview" + }, + { + "type": "string", + "const": "web_search_preview_2025_03_11" + } + ], + "title": "Type", + "default": "web_search" + }, + "search_context_size": { + "title": "Search Context Size", + "default": "medium", + "type": "string", + "pattern": "^low|medium|high$" + } + }, + "type": "object", + "title": "OpenAIResponseInputToolWebSearch", + "description": "Web search tool configuration for OpenAI response inputs." + }, + "OpenAIResponseMCPApprovalRequest": { + "properties": { + "arguments": { + "type": "string", + "title": "Arguments" + }, + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "type": { + "type": "string", + "const": "mcp_approval_request", + "title": "Type", + "default": "mcp_approval_request" + } + }, + "type": "object", + "required": [ + "arguments", + "id", + "name", + "server_label" + ], + "title": "OpenAIResponseMCPApprovalRequest", + "description": "A request for human approval of a tool invocation." + }, + "OpenAIResponseMCPApprovalResponse": { + "properties": { + "approval_request_id": { + "type": "string", + "title": "Approval Request Id" + }, + "approve": { + "type": "boolean", + "title": "Approve" + }, + "type": { + "type": "string", + "const": "mcp_approval_response", + "title": "Type", + "default": "mcp_approval_response" + }, + "id": { + "title": "Id", + "type": "string" + }, + "reason": { + "title": "Reason", + "type": "string" + } + }, + "type": "object", + "required": [ + "approval_request_id", + "approve" + ], + "title": "OpenAIResponseMCPApprovalResponse", + "description": "A response to an MCP approval request." + }, + "OpenAIResponseMessage-Input": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "array" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "output_text": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "title": "Role" + }, + "type": { + "type": "string", + "const": "message", + "title": "Type", + "default": "message" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." + }, + "OpenAIResponseMessage-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "array" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "output_text": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "title": "Role" + }, + "type": { + "type": "string", + "const": "message", + "title": "Type", + "default": "message" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." + }, + "OpenAIResponseObject": { + "properties": { + "created_at": { + "type": "integer", + "title": "Created At" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError" + }, + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "object": { + "type": "string", + "const": "response", + "title": "Object", + "default": "response" + }, + "output": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Output" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Output", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "type": "array", + "title": "Output" + }, + "parallel_tool_calls": { + "type": "boolean", + "title": "Parallel Tool Calls", + "default": false + }, + "previous_response_id": { + "title": "Previous Response Id", + "type": "string" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "status": { + "type": "string", + "title": "Status" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "default": { + "format": { + "type": "text" + } + } + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "tools": { + "title": "Tools", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseToolMCP" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "type": "array" + }, + "truncation": { + "title": "Truncation", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIResponseUsage" + }, + "instructions": { + "title": "Instructions", + "type": "string" + } + }, + "type": "object", + "required": [ + "created_at", + "id", + "model", + "output", + "status" + ], + "title": "OpenAIResponseObject", + "description": "Complete OpenAI response object containing generation results and metadata." + }, + "OpenAIResponseOutputMessageContentOutputText": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "output_text", + "title": "Type", + "default": "output_text" + }, + "annotations": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation", + "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation", + "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath", + "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation" + } + } + }, + "type": "array", + "title": "Annotations" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIResponseOutputMessageContentOutputText" + }, + "OpenAIResponseOutputMessageFileSearchToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "queries": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Queries" + }, + "status": { + "type": "string", + "title": "Status" + }, + "type": { + "type": "string", + "const": "file_search_call", + "title": "Type", + "default": "file_search_call" + }, + "results": { + "title": "Results", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCallResults" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "id", + "queries", + "status" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCall", + "description": "File search tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageFileSearchToolCallResults": { + "properties": { + "attributes": { + "additionalProperties": true, + "type": "object", + "title": "Attributes" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "score": { + "type": "number", + "title": "Score" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "attributes", + "file_id", + "filename", + "score", + "text" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCallResults", + "description": "Search results returned by the file search operation." + }, + "OpenAIResponseOutputMessageFunctionToolCall": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "arguments": { + "type": "string", + "title": "Arguments" + }, + "type": { + "type": "string", + "const": "function_call", + "title": "Type", + "default": "function_call" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "call_id", + "name", + "arguments" + ], + "title": "OpenAIResponseOutputMessageFunctionToolCall", + "description": "Function tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "type": { + "type": "string", + "const": "mcp_call", + "title": "Type", + "default": "mcp_call" + }, + "arguments": { + "type": "string", + "title": "Arguments" + }, + "name": { + "type": "string", + "title": "Name" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "error": { + "title": "Error", + "type": "string" + }, + "output": { + "title": "Output", + "type": "string" + } + }, + "type": "object", + "required": [ + "id", + "arguments", + "name", + "server_label" + ], + "title": "OpenAIResponseOutputMessageMCPCall", + "description": "Model Context Protocol (MCP) call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPListTools": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "type": { + "type": "string", + "const": "mcp_list_tools", + "title": "Type", + "default": "mcp_list_tools" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "tools": { + "items": { + "$ref": "#/components/schemas/MCPListToolsTool" + }, + "type": "array", + "title": "Tools" + } + }, + "type": "object", + "required": [ + "id", + "server_label", + "tools" + ], + "title": "OpenAIResponseOutputMessageMCPListTools", + "description": "MCP list tools output message containing available tools from an MCP server." + }, + "OpenAIResponseOutputMessageWebSearchToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "status": { + "type": "string", + "title": "Status" + }, + "type": { + "type": "string", + "const": "web_search_call", + "title": "Type", + "default": "web_search_call" + } + }, + "type": "object", + "required": [ + "id", + "status" + ], + "title": "OpenAIResponseOutputMessageWebSearchToolCall", + "description": "Web search tool call output message for OpenAI responses." + }, + "OpenAIResponsePrompt": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "variables": { + "title": "Variables", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "object" + }, + "version": { + "title": "Version", + "type": "string" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "OpenAIResponsePrompt", + "description": "OpenAI compatible Prompt object that is used in OpenAI responses." + }, + "OpenAIResponseText": { + "properties": { + "format": { + "$ref": "#/components/schemas/OpenAIResponseTextFormat" + } + }, + "type": "object", + "title": "OpenAIResponseText", + "description": "Text response configuration for OpenAI responses." + }, + "OpenAIResponseTextFormat": { + "properties": { + "type": { + "anyOf": [ + { + "type": "string", + "const": "text" + }, + { + "type": "string", + "const": "json_schema" + }, + { + "type": "string", + "const": "json_object" + } + ], + "title": "Type" + }, + "name": { + "title": "Name", + "type": "string" + }, + "schema": { + "title": "Schema", + "additionalProperties": true, + "type": "object" + }, + "description": { + "title": "Description", + "type": "string" + }, + "strict": { + "title": "Strict", + "type": "boolean" + } + }, + "type": "object", + "title": "OpenAIResponseTextFormat", + "description": "Configuration for Responses API text format." + }, + "OpenAIResponseToolMCP": { + "properties": { + "type": { + "type": "string", + "const": "mcp", + "title": "Type", + "default": "mcp" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "allowed_tools": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/AllowedToolsFilter" + } + ], + "title": "Allowed Tools" + } + }, + "type": "object", + "required": [ + "server_label" + ], + "title": "OpenAIResponseToolMCP", + "description": "Model Context Protocol (MCP) tool configuration for OpenAI response object." + }, + "OpenAIResponseUsage": { + "properties": { + "input_tokens": { + "type": "integer", + "title": "Input Tokens" + }, + "output_tokens": { + "type": "integer", + "title": "Output Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + }, + "input_tokens_details": { + "$ref": "#/components/schemas/OpenAIResponseUsageInputTokensDetails" + }, + "output_tokens_details": { + "$ref": "#/components/schemas/OpenAIResponseUsageOutputTokensDetails" + } + }, + "type": "object", + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "OpenAIResponseUsage", + "description": "Usage information for OpenAI response." + }, + "OpenAIResponseUsageInputTokensDetails": { + "properties": { + "cached_tokens": { + "title": "Cached Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIResponseUsageInputTokensDetails", + "description": "Token details for input tokens in OpenAI response usage." + }, + "OpenAIResponseUsageOutputTokensDetails": { + "properties": { + "reasoning_tokens": { + "title": "Reasoning Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIResponseUsageOutputTokensDetails", + "description": "Token details for output tokens in OpenAI response usage." + }, + "OpenAISystemMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "system", + "title": "Role", + "default": "system" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAISystemMessageParam", + "description": "A system message providing instructions or context to the model." + }, + "OpenAITokenLogProb": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "title": "Bytes", + "items": { + "type": "integer" + }, + "type": "array" + }, + "logprob": { + "type": "number", + "title": "Logprob" + }, + "top_logprobs": { + "items": { + "$ref": "#/components/schemas/OpenAITopLogProb" + }, + "type": "array", + "title": "Top Logprobs" + } + }, + "type": "object", + "required": [ + "token", + "logprob", + "top_logprobs" + ], + "title": "OpenAITokenLogProb", + "description": "The log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIToolMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "tool_call_id": { + "type": "string", + "title": "Tool Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "tool_call_id", + "content" + ], + "title": "OpenAIToolMessageParam", + "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." + }, + "OpenAITopLogProb": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "title": "Bytes", + "items": { + "type": "integer" + }, + "type": "array" + }, + "logprob": { + "type": "number", + "title": "Logprob" + } + }, + "type": "object", + "required": [ + "token", + "logprob" + ], + "title": "OpenAITopLogProb", + "description": "The top log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIUserMessageParam-Input": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file": "#/components/schemas/OpenAIFile", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OpenAIUserMessageParam-Output": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file": "#/components/schemas/OpenAIFile", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OptimizerConfig": { + "properties": { + "optimizer_type": { + "$ref": "#/components/schemas/OptimizerType" + }, + "lr": { + "type": "number", + "title": "Lr" + }, + "weight_decay": { + "type": "number", + "title": "Weight Decay" + }, + "num_warmup_steps": { + "type": "integer", + "title": "Num Warmup Steps" + } + }, + "type": "object", + "required": [ + "optimizer_type", + "lr", + "weight_decay", + "num_warmup_steps" + ], + "title": "OptimizerConfig", + "description": "Configuration parameters for the optimization algorithm." + }, + "OptimizerType": { + "type": "string", + "enum": [ + "adam", + "adamw", + "sgd" + ], + "title": "OptimizerType", + "description": "Available optimizer algorithms for training." + }, + "Order": { + "type": "string", + "enum": [ + "asc", + "desc" + ], + "title": "Order", + "description": "Sort order for paginated responses." + }, + "OutputTokensDetails": { + "properties": { + "reasoning_tokens": { + "type": "integer", + "title": "Reasoning Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "reasoning_tokens" + ], + "title": "OutputTokensDetails" + }, + "PostTrainingJob": { + "properties": { + "job_uuid": { + "type": "string", + "title": "Job Uuid" + } + }, + "type": "object", + "required": [ + "job_uuid" + ], + "title": "PostTrainingJob" + }, + "Prompt": { + "properties": { + "prompt": { + "title": "Prompt", + "description": "The system prompt with variable placeholders", + "type": "string" + }, + "version": { + "type": "integer", + "minimum": 1.0, + "title": "Version", + "description": "Version (integer starting at 1, incremented on save)" + }, + "prompt_id": { + "type": "string", + "title": "Prompt Id", + "description": "Unique identifier in format 'pmpt_<48-digit-hash>'" + }, + "variables": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Variables", + "description": "List of variable names that can be used in the prompt template" + }, + "is_default": { + "type": "boolean", + "title": "Is Default", + "description": "Boolean indicating whether this version is the default version", + "default": false + } + }, + "type": "object", + "required": [ + "version", + "prompt_id" + ], + "title": "Prompt", + "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack." + }, + "ProviderInfo": { + "properties": { + "api": { + "type": "string", + "title": "Api" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "provider_type": { + "type": "string", + "title": "Provider Type" + }, + "config": { + "additionalProperties": true, + "type": "object", + "title": "Config" + }, + "health": { + "additionalProperties": true, + "type": "object", + "title": "Health" + } + }, + "type": "object", + "required": [ + "api", + "provider_id", + "provider_type", + "config", + "health" + ], + "title": "ProviderInfo", + "description": "Information about a registered provider including its configuration and health status." + }, + "QueryChunksResponse": { + "properties": { + "chunks": { + "items": { + "$ref": "#/components/schemas/Chunk-Output" + }, + "type": "array", + "title": "Chunks" + }, + "scores": { + "items": { + "type": "number" + }, + "type": "array", + "title": "Scores" + } + }, + "type": "object", + "required": [ + "chunks", + "scores" + ], + "title": "QueryChunksResponse", + "description": "Response from querying chunks in a vector database." + }, + "RAGQueryConfig": { + "properties": { + "query_generator_config": { + "oneOf": [ + { + "$ref": "#/components/schemas/DefaultRAGQueryGeneratorConfig" + }, + { + "$ref": "#/components/schemas/LLMRAGQueryGeneratorConfig" + } + ], + "title": "Query Generator Config", + "default": { + "type": "default", + "separator": " " + }, + "discriminator": { + "propertyName": "type", + "mapping": { + "default": "#/components/schemas/DefaultRAGQueryGeneratorConfig", + "llm": "#/components/schemas/LLMRAGQueryGeneratorConfig" + } + } + }, + "max_tokens_in_context": { + "type": "integer", + "title": "Max Tokens In Context", + "default": 4096 + }, + "max_chunks": { + "type": "integer", + "title": "Max Chunks", + "default": 5 + }, + "chunk_template": { + "type": "string", + "title": "Chunk Template", + "default": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n" + }, + "mode": { + "default": "vector", + "$ref": "#/components/schemas/RAGSearchMode" + }, + "ranker": { + "title": "Ranker", + "oneOf": [ + { + "$ref": "#/components/schemas/RRFRanker" + }, + { + "$ref": "#/components/schemas/WeightedRanker" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "rrf": "#/components/schemas/RRFRanker", + "weighted": "#/components/schemas/WeightedRanker" + } + } + } + }, + "type": "object", + "title": "RAGQueryConfig", + "description": "Configuration for the RAG query generation." + }, + "RAGQueryResult": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "title": "RAGQueryResult", + "description": "Result of a RAG query containing retrieved content and metadata." + }, + "RAGSearchMode": { + "type": "string", + "enum": [ + "vector", + "keyword", + "hybrid" + ], + "title": "RAGSearchMode", + "description": "Search modes for RAG query retrieval:\n- VECTOR: Uses vector similarity search for semantic matching\n- KEYWORD: Uses keyword-based search for exact matching\n- HYBRID: Combines both vector and keyword search for better results" + }, + "RRFRanker": { + "properties": { + "type": { + "type": "string", + "const": "rrf", + "title": "Type", + "default": "rrf" + }, + "impact_factor": { + "type": "number", + "title": "Impact Factor", + "default": 60.0, + "minimum": 0.0 + } + }, + "type": "object", + "title": "RRFRanker", + "description": "Reciprocal Rank Fusion (RRF) ranker configuration." + }, + "RegexParserScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "regex_parser", + "title": "Type", + "default": "regex_parser" + }, + "parsing_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Parsing Regexes", + "description": "Regex to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "RegexParserScoringFnParams", + "description": "Parameters for regex parser scoring function configuration." + }, + "RerankData": { + "properties": { + "index": { + "type": "integer", + "title": "Index" + }, + "relevance_score": { + "type": "number", + "title": "Relevance Score" + } + }, + "type": "object", + "required": [ + "index", + "relevance_score" + ], + "title": "RerankData", + "description": "A single rerank result from a reranking response." + }, + "RerankResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/RerankData" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "RerankResponse", + "description": "Response from a reranking request." + }, + "RouteInfo": { + "properties": { + "route": { + "type": "string", + "title": "Route" + }, + "method": { + "type": "string", + "title": "Method" + }, + "provider_types": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Provider Types" + } + }, + "type": "object", + "required": [ + "route", + "method", + "provider_types" + ], + "title": "RouteInfo", + "description": "Information about an API route including its path, method, and implementing providers." + }, + "RowsDataSource": { + "properties": { + "type": { + "type": "string", + "const": "rows", + "title": "Type", + "default": "rows" + }, + "rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Rows" + } + }, + "type": "object", + "required": [ + "rows" + ], + "title": "RowsDataSource", + "description": "A dataset stored in rows." + }, + "RunShieldResponse": { + "properties": { + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "title": "RunShieldResponse", + "description": "Response from running a safety shield." + }, + "SafetyViolation": { + "properties": { + "violation_level": { + "$ref": "#/components/schemas/ViolationLevel" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "violation_level" + ], + "title": "SafetyViolation", + "description": "Details of a safety violation detected by content moderation." + }, + "SamplingParams": { + "properties": { + "strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/GreedySamplingStrategy" + }, + { + "$ref": "#/components/schemas/TopPSamplingStrategy" + }, + { + "$ref": "#/components/schemas/TopKSamplingStrategy" + } + ], + "title": "Strategy", + "discriminator": { + "propertyName": "type", + "mapping": { + "greedy": "#/components/schemas/GreedySamplingStrategy", + "top_k": "#/components/schemas/TopKSamplingStrategy", + "top_p": "#/components/schemas/TopPSamplingStrategy" + } + } + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "default": 1.0, + "type": "number" + }, + "stop": { + "title": "Stop", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "SamplingParams", + "description": "Sampling parameters." + }, + "ScoreBatchResponse": { + "properties": { + "dataset_id": { + "title": "Dataset Id", + "type": "string" + }, + "results": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "results" + ], + "title": "ScoreBatchResponse", + "description": "Response from batch scoring operations on datasets." + }, + "ScoreResponse": { + "properties": { + "results": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "results" + ], + "title": "ScoreResponse", + "description": "The response from scoring." + }, + "ScoringFn-Input": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "scoring_function", + "title": "Type", + "default": "scoring_function" + }, + "description": { + "title": "Description", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this definition" + }, + "return_type": { + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type", + "description": "The return type of the deterministic function", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent_turn_input": "#/components/schemas/AgentTurnInputType", + "array": "#/components/schemas/ArrayType", + "boolean": "#/components/schemas/BooleanType", + "chat_completion_input": "#/components/schemas/ChatCompletionInputType", + "completion_input": "#/components/schemas/CompletionInputType", + "json": "#/components/schemas/JsonType", + "number": "#/components/schemas/NumberType", + "object": "#/components/schemas/ObjectType", + "string": "#/components/schemas/StringType", + "union": "#/components/schemas/UnionType" + } + } + }, + "params": { + "title": "Params", + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "description": "A scoring function resource for evaluating model outputs." + }, + "ScoringFn-Output": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "scoring_function", + "title": "Type", + "default": "scoring_function" + }, + "description": { + "title": "Description", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this definition" + }, + "return_type": { + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type", + "description": "The return type of the deterministic function", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent_turn_input": "#/components/schemas/AgentTurnInputType", + "array": "#/components/schemas/ArrayType", + "boolean": "#/components/schemas/BooleanType", + "chat_completion_input": "#/components/schemas/ChatCompletionInputType", + "completion_input": "#/components/schemas/CompletionInputType", + "json": "#/components/schemas/JsonType", + "number": "#/components/schemas/NumberType", + "object": "#/components/schemas/ObjectType", + "string": "#/components/schemas/StringType", + "union": "#/components/schemas/UnionType" + } + } + }, + "params": { + "title": "Params", + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "description": "A scoring function resource for evaluating model outputs." + }, + "ScoringResult": { + "properties": { + "score_rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Score Rows" + }, + "aggregated_results": { + "additionalProperties": true, + "type": "object", + "title": "Aggregated Results" + } + }, + "type": "object", + "required": [ + "score_rows", + "aggregated_results" + ], + "title": "ScoringResult", + "description": "A scoring result for a single row." + }, + "SearchRankingOptions": { + "properties": { + "ranker": { + "title": "Ranker", + "type": "string" + }, + "score_threshold": { + "title": "Score Threshold", + "default": 0.0, + "type": "number" + } + }, + "type": "object", + "title": "SearchRankingOptions", + "description": "Options for ranking and filtering search results." + }, + "Shield": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "shield", + "title": "Type", + "default": "shield" + }, + "params": { + "title": "Params", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "Shield", + "description": "A safety shield resource that can be used to check content." + }, + "ShieldCallStep-Input": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "shield_call", + "title": "Step Type", + "default": "shield_call" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "description": "A shield call step in an agent turn." + }, + "ShieldCallStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "shield_call", + "title": "Step Type", + "default": "shield_call" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "description": "A shield call step in an agent turn." + }, + "StopReason": { + "type": "string", + "enum": [ + "end_of_turn", + "end_of_message", + "out_of_tokens" + ], + "title": "StopReason" + }, + "StringType": { + "properties": { + "type": { + "type": "string", + "const": "string", + "title": "Type", + "default": "string" + } + }, + "type": "object", + "title": "StringType", + "description": "Parameter type for string values." + }, + "SystemMessage": { + "properties": { + "role": { + "type": "string", + "const": "system", + "title": "Role", + "default": "system" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "SystemMessage", + "description": "A system message providing instructions or context to the model." + }, + "SystemMessageBehavior": { + "type": "string", + "enum": [ + "append", + "replace" + ], + "title": "SystemMessageBehavior", + "description": "Config for how to override the default system prompt." + }, + "TextContentItem": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "TextContentItem", + "description": "A text content item" + }, + "ToolCall": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "arguments": { + "type": "string", + "title": "Arguments" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "arguments" + ], + "title": "ToolCall" + }, + "ToolChoice": { + "type": "string", + "enum": [ + "auto", + "required", + "none" + ], + "title": "ToolChoice", + "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model." + }, + "ToolConfig": { + "properties": { + "tool_choice": { + "anyOf": [ + { + "$ref": "#/components/schemas/ToolChoice" + }, + { + "type": "string" + } + ], + "title": "Tool Choice", + "default": "auto" + }, + "tool_prompt_format": { + "$ref": "#/components/schemas/ToolPromptFormat" + }, + "system_message_behavior": { + "default": "append", + "$ref": "#/components/schemas/SystemMessageBehavior" + } + }, + "type": "object", + "title": "ToolConfig", + "description": "Configuration for tool use." + }, + "ToolDef": { + "properties": { + "toolgroup_id": { + "title": "Toolgroup Id", + "type": "string" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "input_schema": { + "title": "Input Schema", + "additionalProperties": true, + "type": "object" + }, + "output_schema": { + "title": "Output Schema", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ToolDef", + "description": "Tool definition used in runtime contexts." + }, + "ToolExecutionStep-Input": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "tool_execution", + "title": "Step Type", + "default": "tool_execution" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array", + "title": "Tool Calls" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse-Input" + }, + "type": "array", + "title": "Tool Responses" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "description": "A tool execution step in an agent turn." + }, + "ToolExecutionStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "tool_execution", + "title": "Step Type", + "default": "tool_execution" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array", + "title": "Tool Calls" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse-Output" + }, + "type": "array", + "title": "Tool Responses" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "description": "A tool execution step in an agent turn." + }, + "ToolGroup": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "tool_group", + "title": "Type", + "default": "tool_group" + }, + "mcp_endpoint": { + "$ref": "#/components/schemas/URL" + }, + "args": { + "title": "Args", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "ToolGroup", + "description": "A group of related tools managed together." + }, + "ToolInvocationResult": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "error_message": { + "title": "Error Message", + "type": "string" + }, + "error_code": { + "title": "Error Code", + "type": "integer" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "title": "ToolInvocationResult", + "description": "Result of a tool invocation." + }, + "ToolPromptFormat": { + "type": "string", + "enum": [ + "json", + "function_tag", + "python_list" + ], + "title": "ToolPromptFormat", + "description": "Prompt format for calling custom / zero shot tools." + }, + "ToolResponse-Input": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "description": "Response from a tool invocation." + }, + "ToolResponse-Output": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "description": "Response from a tool invocation." + }, + "ToolResponseMessage-Input": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "call_id": { + "type": "string", + "title": "Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "call_id", + "content" + ], + "title": "ToolResponseMessage", + "description": "A message representing the result of a tool invocation." + }, + "ToolResponseMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "call_id": { + "type": "string", + "title": "Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "call_id", + "content" + ], + "title": "ToolResponseMessage", + "description": "A message representing the result of a tool invocation." + }, + "TopKSamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "top_k", + "title": "Type", + "default": "top_k" + }, + "top_k": { + "type": "integer", + "minimum": 1.0, + "title": "Top K" + } + }, + "type": "object", + "required": [ + "top_k" + ], + "title": "TopKSamplingStrategy", + "description": "Top-k sampling strategy that restricts sampling to the k most likely tokens." + }, + "TopPSamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "top_p", + "title": "Type", + "default": "top_p" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "minimum": 0.0 + }, + "top_p": { + "title": "Top P", + "default": 0.95, + "type": "number" + } + }, + "type": "object", + "required": [ + "temperature" + ], + "title": "TopPSamplingStrategy", + "description": "Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p." + }, + "TrainingConfig": { + "properties": { + "n_epochs": { + "type": "integer", + "title": "N Epochs" + }, + "max_steps_per_epoch": { + "type": "integer", + "title": "Max Steps Per Epoch", + "default": 1 + }, + "gradient_accumulation_steps": { + "type": "integer", + "title": "Gradient Accumulation Steps", + "default": 1 + }, + "max_validation_steps": { + "title": "Max Validation Steps", + "default": 1, + "type": "integer" + }, + "data_config": { + "$ref": "#/components/schemas/DataConfig" + }, + "optimizer_config": { + "$ref": "#/components/schemas/OptimizerConfig" + }, + "efficiency_config": { + "$ref": "#/components/schemas/EfficiencyConfig" + }, + "dtype": { + "title": "Dtype", + "default": "bf16", + "type": "string" + } + }, + "type": "object", + "required": [ + "n_epochs" + ], + "title": "TrainingConfig", + "description": "Comprehensive configuration for the training process." + }, + "Turn": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "input_messages": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserMessage-Output" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage-Output" + } + ] + }, + "type": "array", + "title": "Input Messages" + }, + "steps": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep-Output" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep-Output" + }, + { + "$ref": "#/components/schemas/ShieldCallStep-Output" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep-Output" + } + ], + "discriminator": { + "propertyName": "step_type", + "mapping": { + "inference": "#/components/schemas/InferenceStep-Output", + "memory_retrieval": "#/components/schemas/MemoryRetrievalStep-Output", + "shield_call": "#/components/schemas/ShieldCallStep-Output", + "tool_execution": "#/components/schemas/ToolExecutionStep-Output" + } + } + }, + "type": "array", + "title": "Steps" + }, + "output_message": { + "$ref": "#/components/schemas/CompletionMessage-Output" + }, + "output_attachments": { + "title": "Output Attachments", + "items": { + "$ref": "#/components/schemas/Attachment-Output" + }, + "type": "array" + }, + "started_at": { + "type": "string", + "format": "date-time", + "title": "Started At" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + } + }, + "type": "object", + "required": [ + "turn_id", + "session_id", + "input_messages", + "steps", + "output_message", + "started_at" + ], + "title": "Turn", + "description": "A single turn in an interaction with an Agentic System." + }, + "URIDataSource": { + "properties": { + "type": { + "type": "string", + "const": "uri", + "title": "Type", + "default": "uri" + }, + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URIDataSource", + "description": "A dataset that can be obtained from a URI." + }, + "URL": { + "properties": { + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URL", + "description": "A URL reference to external content." + }, + "UnionType": { + "properties": { + "type": { + "type": "string", + "const": "union", + "title": "Type", + "default": "union" + } + }, + "type": "object", + "title": "UnionType", + "description": "Parameter type for union values." + }, + "UserMessage-Input": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Context" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "UserMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Context" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "VectorStoreChunkingStrategyAuto": { + "properties": { + "type": { + "type": "string", + "const": "auto", + "title": "Type", + "default": "auto" + } + }, + "type": "object", + "title": "VectorStoreChunkingStrategyAuto", + "description": "Automatic chunking strategy for vector store files." + }, + "VectorStoreChunkingStrategyStatic": { + "properties": { + "type": { + "type": "string", + "const": "static", + "title": "Type", + "default": "static" + }, + "static": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStaticConfig" + } + }, + "type": "object", + "required": [ + "static" + ], + "title": "VectorStoreChunkingStrategyStatic", + "description": "Static chunking strategy with configurable parameters." + }, + "VectorStoreChunkingStrategyStaticConfig": { + "properties": { + "chunk_overlap_tokens": { + "type": "integer", + "title": "Chunk Overlap Tokens", + "default": 400 + }, + "max_chunk_size_tokens": { + "type": "integer", + "maximum": 4096.0, + "minimum": 100.0, + "title": "Max Chunk Size Tokens", + "default": 800 + } + }, + "type": "object", + "title": "VectorStoreChunkingStrategyStaticConfig", + "description": "Configuration for static chunking strategy." + }, + "VectorStoreContent": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "type", + "text" + ], + "title": "VectorStoreContent", + "description": "Content item from a vector store file or search result." + }, + "VectorStoreFileBatchObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.file_batch" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "status": { + "anyOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ], + "title": "Status" + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts" + } + }, + "type": "object", + "required": [ + "id", + "created_at", + "vector_store_id", + "status", + "file_counts" + ], + "title": "VectorStoreFileBatchObject", + "description": "OpenAI Vector Store File Batch object." + }, + "VectorStoreFileCounts": { + "properties": { + "completed": { + "type": "integer", + "title": "Completed" + }, + "cancelled": { + "type": "integer", + "title": "Cancelled" + }, + "failed": { + "type": "integer", + "title": "Failed" + }, + "in_progress": { + "type": "integer", + "title": "In Progress" + }, + "total": { + "type": "integer", + "title": "Total" + } + }, + "type": "object", + "required": [ + "completed", + "cancelled", + "failed", + "in_progress", + "total" + ], + "title": "VectorStoreFileCounts", + "description": "File processing status counts for a vector store." + }, + "VectorStoreFileLastError": { + "properties": { + "code": { + "anyOf": [ + { + "type": "string", + "const": "server_error" + }, + { + "type": "string", + "const": "rate_limit_exceeded" + } + ], + "title": "Code" + }, + "message": { + "type": "string", + "title": "Message" + } + }, + "type": "object", + "required": [ + "code", + "message" + ], + "title": "VectorStoreFileLastError", + "description": "Error information for failed vector store file processing." + }, + "VectorStoreFileObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.file" + }, + "attributes": { + "additionalProperties": true, + "type": "object", + "title": "Attributes" + }, + "chunking_strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy", + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "last_error": { + "$ref": "#/components/schemas/VectorStoreFileLastError" + }, + "status": { + "anyOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ], + "title": "Status" + }, + "usage_bytes": { + "type": "integer", + "title": "Usage Bytes", + "default": 0 + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "id", + "chunking_strategy", + "created_at", + "status", + "vector_store_id" + ], + "title": "VectorStoreFileObject", + "description": "OpenAI Vector Store File object." + }, + "VectorStoreObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "name": { + "title": "Name", + "type": "string" + }, + "usage_bytes": { + "type": "integer", + "title": "Usage Bytes", + "default": 0 + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts" + }, + "status": { + "type": "string", + "title": "Status", + "default": "completed" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object" + }, + "expires_at": { + "title": "Expires At", + "type": "integer" + }, + "last_active_at": { + "title": "Last Active At", + "type": "integer" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "id", + "created_at", + "file_counts" + ], + "title": "VectorStoreObject", + "description": "OpenAI Vector Store object." + }, + "VectorStoreSearchResponse": { + "properties": { + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "score": { + "type": "number", + "title": "Score" + }, + "attributes": { + "title": "Attributes", + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "boolean" + } + ] + }, + "type": "object" + }, + "content": { + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "type": "array", + "title": "Content" + } + }, + "type": "object", + "required": [ + "file_id", + "filename", + "score", + "content" + ], + "title": "VectorStoreSearchResponse", + "description": "Response from searching a vector store." + }, + "VectorStoreSearchResponsePage": { + "properties": { + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.search_results.page" + }, + "search_query": { + "type": "string", + "title": "Search Query" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreSearchResponse" + }, + "type": "array", + "title": "Data" + }, + "has_more": { + "type": "boolean", + "title": "Has More", + "default": false + }, + "next_page": { + "title": "Next Page", + "type": "string" + } + }, + "type": "object", + "required": [ + "search_query", + "data" + ], + "title": "VectorStoreSearchResponsePage", + "description": "Paginated response from searching a vector store." + }, + "VersionInfo": { + "properties": { + "version": { + "type": "string", + "title": "Version" + } + }, + "type": "object", + "required": [ + "version" + ], + "title": "VersionInfo", + "description": "Version information for the service." + }, + "ViolationLevel": { + "type": "string", + "enum": [ + "info", + "warn", + "error" + ], + "title": "ViolationLevel", + "description": "Severity level of a safety violation." + }, + "WeightedRanker": { + "properties": { + "type": { + "type": "string", + "const": "weighted", + "title": "Type", + "default": "weighted" + }, + "alpha": { + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "title": "Alpha", + "description": "Weight factor between 0 and 1. 0 means only keyword scores, 1 means only vector scores.", + "default": 0.5 + } + }, + "type": "object", + "title": "WeightedRanker", + "description": "Weighted ranker configuration that combines vector and keyword scores." + }, + "_URLOrData": { + "properties": { + "url": { + "$ref": "#/components/schemas/URL" + }, + "data": { + "contentEncoding": "base64", + "title": "Data", + "type": "string" + } + }, + "type": "object", + "title": "_URLOrData", + "description": "A URL or a base64 encoded string" + }, + "__main_____agents_agent_id_session_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_name": { + "type": "string", + "title": "Session Name" + } + }, + "type": "object", + "required": [ + "agent_id", + "session_name" + ], + "title": "_agents_agent_id_session_Request" + }, + "__main_____agents_agent_id_session_session_id_turn_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "messages": { + "$ref": "#/components/schemas/UserMessage-Input" + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "documents": { + "$ref": "#/components/schemas/Document" + }, + "toolgroups": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ], + "title": "Toolgroups" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + } + }, + "type": "object", + "required": [ + "agent_id", + "session_id", + "messages", + "documents", + "toolgroups", + "tool_config" + ], + "title": "_agents_agent_id_session_session_id_turn_Request" + }, + "__main_____agents_agent_id_session_session_id_turn_turn_id_resume_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "tool_responses": { + "$ref": "#/components/schemas/ToolResponse-Input" + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + } + }, + "type": "object", + "required": [ + "agent_id", + "session_id", + "turn_id", + "tool_responses" + ], + "title": "_agents_agent_id_session_session_id_turn_turn_id_resume_Request" + }, + "__main_____datasets_Request": { + "properties": { + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "dataset_id": { + "type": "string", + "title": "Dataset Id" + } + }, + "type": "object", + "required": [ + "purpose", + "metadata", + "dataset_id" + ], + "title": "_datasets_Request" + }, + "_batches_Request": { + "properties": { + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "idempotency_key": { + "type": "string", + "title": "Idempotency Key" + } + }, + "type": "object", + "required": [ + "input_file_id", + "endpoint", + "completion_window", + "metadata", + "idempotency_key" + ], + "title": "_batches_Request" + }, + "_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + } + }, + "type": "object", + "required": [ + "batch_id" + ], + "title": "_batches_batch_id_cancel_Request" + }, + "_conversations_Request": { + "properties": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Input" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "title": "Items", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/components/schemas/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Input", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "items", + "metadata" + ], + "title": "_conversations_Request" + }, + "_conversations_conversation_id_Request": { + "properties": { + "conversation_id": { + "type": "string", + "title": "Conversation Id" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "conversation_id", + "metadata" + ], + "title": "_conversations_conversation_id_Request" + }, + "_conversations_conversation_id_items_Request": { + "properties": { + "conversation_id": { + "type": "string", + "title": "Conversation Id" + }, + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Input" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "title": "Items" + } + }, + "type": "object", + "required": [ + "conversation_id", + "items" + ], + "title": "_conversations_conversation_id_items_Request" + }, + "_inference_rerank_Request": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "query": { + "type": "string", + "title": "Query" + }, + "items": { + "type": "string", + "title": "Items" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results" + } + }, + "type": "object", + "required": [ + "model", + "query", + "items", + "max_num_results" + ], + "title": "_inference_rerank_Request" + }, + "_models_Request": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id" + }, + "provider_model_id": { + "type": "string", + "title": "Provider Model Id" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "model_type": { + "$ref": "#/components/schemas/ModelType" + } + }, + "type": "object", + "required": [ + "model_id", + "provider_model_id", + "provider_id", + "metadata", + "model_type" + ], + "title": "_models_Request" + }, + "_moderations_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + } + }, + "type": "object", + "required": [ + "input", + "model" + ], + "title": "_moderations_Request" + }, + "_openai_v1_batches_Request": { + "properties": { + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "idempotency_key": { + "type": "string", + "title": "Idempotency Key" + } + }, + "type": "object", + "required": [ + "input_file_id", + "endpoint", + "completion_window", + "metadata", + "idempotency_key" + ], + "title": "_openai_v1_batches_Request" + }, + "_openai_v1_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + } + }, + "type": "object", + "required": [ + "batch_id" + ], + "title": "_openai_v1_batches_batch_id_cancel_Request" + }, + "_openai_v1_moderations_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + } + }, + "type": "object", + "required": [ + "input", + "model" + ], + "title": "_openai_v1_moderations_Request" + }, + "_openai_v1_responses_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "previous_response_id": { + "type": "string", + "title": "Previous Response Id" + }, + "conversation": { + "type": "string", + "title": "Conversation" + }, + "store": { + "type": "boolean", + "title": "Store", + "default": true + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "temperature": { + "type": "number", + "title": "Temperature" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText" + }, + "tools": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolMCP" + } + ], + "title": "Tools", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseInputToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "include": { + "type": "string", + "title": "Include" + }, + "max_infer_iters": { + "type": "integer", + "title": "Max Infer Iters", + "default": 10 + } + }, + "type": "object", + "required": [ + "input", + "model", + "prompt", + "instructions", + "previous_response_id", + "conversation", + "temperature", + "text", + "tools", + "include" + ], + "title": "_openai_v1_responses_Request" + }, + "_openai_v1_vector_stores_vector_store_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "expires_after": { + "type": "string", + "title": "Expires After" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "name", + "expires_after", + "metadata" + ], + "title": "_openai_v1_vector_stores_vector_store_id_Request" + }, + "_openai_v1_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "batch_id", + "vector_store_id" + ], + "title": "_openai_v1_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + }, + "_openai_v1_vector_stores_vector_store_id_files_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + }, + "chunking_strategy": { + "anyOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes", + "chunking_strategy" + ], + "title": "_openai_v1_vector_stores_vector_store_id_files_Request" + }, + "_openai_v1_vector_stores_vector_store_id_files_file_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes" + ], + "title": "_openai_v1_vector_stores_vector_store_id_files_file_id_Request" + }, + "_openai_v1_vector_stores_vector_store_id_search_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "filters": { + "type": "string", + "title": "Filters" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results", + "default": 10 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + }, + "rewrite_query": { + "type": "boolean", + "title": "Rewrite Query", + "default": false + }, + "search_mode": { + "type": "string", + "title": "Search Mode", + "default": "vector" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "filters", + "ranking_options" + ], + "title": "_openai_v1_vector_stores_vector_store_id_search_Request" + }, + "_prompts_Request": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt" + }, + "variables": { + "type": "string", + "title": "Variables" + } + }, + "type": "object", + "required": [ + "prompt", + "variables" + ], + "title": "_prompts_Request" + }, + "_prompts_prompt_id_Request": { + "properties": { + "prompt_id": { + "type": "string", + "title": "Prompt Id" + }, + "prompt": { + "type": "string", + "title": "Prompt" + }, + "version": { + "type": "integer", + "title": "Version" + }, + "variables": { + "type": "string", + "title": "Variables" + }, + "set_as_default": { + "type": "boolean", + "title": "Set As Default", + "default": true + } + }, + "type": "object", + "required": [ + "prompt_id", + "prompt", + "version", + "variables" + ], + "title": "_prompts_prompt_id_Request" + }, + "_prompts_prompt_id_set_default_version_Request": { + "properties": { + "prompt_id": { + "type": "string", + "title": "Prompt Id" + }, + "version": { + "type": "integer", + "title": "Version" + } + }, + "type": "object", + "required": [ + "prompt_id", + "version" + ], + "title": "_prompts_prompt_id_set_default_version_Request" + }, + "_responses_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "previous_response_id": { + "type": "string", + "title": "Previous Response Id" + }, + "conversation": { + "type": "string", + "title": "Conversation" + }, + "store": { + "type": "boolean", + "title": "Store", + "default": true + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "temperature": { + "type": "number", + "title": "Temperature" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText" + }, + "tools": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolMCP" + } + ], + "title": "Tools", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseInputToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "include": { + "type": "string", + "title": "Include" + }, + "max_infer_iters": { + "type": "integer", + "title": "Max Infer Iters", + "default": 10 + } + }, + "type": "object", + "required": [ + "input", + "model", + "prompt", + "instructions", + "previous_response_id", + "conversation", + "temperature", + "text", + "tools", + "include" + ], + "title": "_responses_Request" + }, + "_scoring_score_Request": { + "properties": { + "input_rows": { + "type": "string", + "title": "Input Rows" + }, + "scoring_functions": { + "type": "string", + "title": "Scoring Functions" + } + }, + "type": "object", + "required": [ + "input_rows", + "scoring_functions" + ], + "title": "_scoring_score_Request" + }, + "_scoring_score_batch_Request": { + "properties": { + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "scoring_functions": { + "type": "string", + "title": "Scoring Functions" + }, + "save_results_dataset": { + "type": "boolean", + "title": "Save Results Dataset", + "default": false + } + }, + "type": "object", + "required": [ + "dataset_id", + "scoring_functions" + ], + "title": "_scoring_score_batch_Request" + }, + "_shields_Request": { + "properties": { + "shield_id": { + "type": "string", + "title": "Shield Id" + }, + "provider_shield_id": { + "type": "string", + "title": "Provider Shield Id" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "params": { + "type": "string", + "title": "Params" + } + }, + "type": "object", + "required": [ + "shield_id", + "provider_shield_id", + "provider_id", + "params" + ], + "title": "_shields_Request" + }, + "_tool_runtime_invoke_Request": { + "properties": { + "tool_name": { + "type": "string", + "title": "Tool Name" + }, + "kwargs": { + "type": "string", + "title": "Kwargs" + } + }, + "type": "object", + "required": [ + "tool_name", + "kwargs" + ], + "title": "_tool_runtime_invoke_Request" + }, + "_tool_runtime_rag_tool_query_Request": { + "properties": { + "content": { + "type": "string", + "title": "Content" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "query_config": { + "$ref": "#/components/schemas/RAGQueryConfig" + } + }, + "type": "object", + "required": [ + "content", + "vector_store_ids", + "query_config" + ], + "title": "_tool_runtime_rag_tool_query_Request" + }, + "_vector_io_query_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "params": { + "type": "string", + "title": "Params" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "params" + ], + "title": "_vector_io_query_Request" + }, + "_vector_stores_vector_store_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "expires_after": { + "type": "string", + "title": "Expires After" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "name", + "expires_after", + "metadata" + ], + "title": "_vector_stores_vector_store_id_Request" + }, + "_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "batch_id", + "vector_store_id" + ], + "title": "_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + }, + "_vector_stores_vector_store_id_files_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + }, + "chunking_strategy": { + "anyOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes", + "chunking_strategy" + ], + "title": "_vector_stores_vector_store_id_files_Request" + }, + "_vector_stores_vector_store_id_files_file_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes" + ], + "title": "_vector_stores_vector_store_id_files_file_id_Request" + }, + "_vector_stores_vector_store_id_search_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "filters": { + "type": "string", + "title": "Filters" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results", + "default": 10 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + }, + "rewrite_query": { + "type": "boolean", + "title": "Rewrite Query", + "default": false + }, + "search_mode": { + "type": "string", + "title": "Search Mode", + "default": "vector" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "filters", + "ranking_options" + ], + "title": "_vector_stores_vector_store_id_search_Request" + }, + "Error": { + "description": "Error response from the API. Roughly follows RFC 7807.", + "properties": { + "status": { + "title": "Status", + "type": "integer" + }, + "title": { + "title": "Title", + "type": "string" + }, + "detail": { + "title": "Detail", + "type": "string" + }, + "instance": { + "title": "Instance", + "type": "string", + "nullable": true + } + }, + "required": [ + "status", + "title", + "detail" + ], + "title": "Error", + "type": "object" + }, + "Agent": { + "description": "An agent instance with configuration and metadata.", + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "agent_config": { + "$ref": "#/components/schemas/AgentConfig" + }, + "created_at": { + "format": "date-time", + "title": "Created At", + "type": "string" + } + }, + "required": [ + "agent_id", + "agent_config", + "created_at" + ], + "title": "Agent", + "type": "object" + }, + "AgentStepResponse": { + "description": "Response containing details of a specific agent step.", + "properties": { + "step": { + "discriminator": { + "mapping": { + "inference": "#/$defs/InferenceStep", + "memory_retrieval": "#/$defs/MemoryRetrievalStep", + "shield_call": "#/$defs/ShieldCallStep", + "tool_execution": "#/$defs/ToolExecutionStep" + }, + "propertyName": "step_type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep" + }, + { + "$ref": "#/components/schemas/ShieldCallStep" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep" + } + ], + "title": "Step" + } + }, + "required": [ + "step" + ], + "title": "AgentStepResponse", + "type": "object" + }, + "AgentTurnCreateRequest": { + "description": "Request to create a new turn for an agent.", + "properties": { + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "input_shields": { + "title": "Input Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "output_shields": { + "title": "Output Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "toolgroups": { + "title": "Toolgroups", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ] + }, + "type": "array" + }, + "client_tools": { + "title": "Client Tools", + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "type": "array" + }, + "tool_choice": { + "deprecated": true, + "$ref": "#/components/schemas/ToolChoice", + "nullable": true + }, + "tool_prompt_format": { + "deprecated": true, + "$ref": "#/components/schemas/ToolPromptFormat", + "nullable": true + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig", + "nullable": true + }, + "max_infer_iters": { + "default": 10, + "title": "Max Infer Iters", + "type": "integer" + }, + "instructions": { + "title": "Instructions", + "type": "string", + "nullable": true + }, + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "session_id": { + "title": "Session Id", + "type": "string" + }, + "messages": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserMessage" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage" + } + ] + }, + "title": "Messages", + "type": "array" + }, + "documents": { + "title": "Documents", + "items": { + "$ref": "#/components/schemas/Document" + }, + "type": "array", + "nullable": true + }, + "stream": { + "default": false, + "title": "Stream", + "type": "boolean" + } + }, + "required": [ + "agent_id", + "session_id", + "messages" + ], + "title": "AgentTurnCreateRequest", + "type": "object" + }, + "AgentTurnResponseEvent": { + "description": "An event in an agent turn response stream.", + "properties": { + "payload": { + "discriminator": { + "mapping": { + "step_complete": "#/$defs/AgentTurnResponseStepCompletePayload", + "step_progress": "#/$defs/AgentTurnResponseStepProgressPayload", + "step_start": "#/$defs/AgentTurnResponseStepStartPayload", + "turn_awaiting_input": "#/$defs/AgentTurnResponseTurnAwaitingInputPayload", + "turn_complete": "#/$defs/AgentTurnResponseTurnCompletePayload", + "turn_start": "#/$defs/AgentTurnResponseTurnStartPayload" + }, + "propertyName": "event_type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/AgentTurnResponseStepStartPayload" + }, + { + "$ref": "#/components/schemas/AgentTurnResponseStepProgressPayload" + }, + { + "$ref": "#/components/schemas/AgentTurnResponseStepCompletePayload" + }, + { + "$ref": "#/components/schemas/AgentTurnResponseTurnStartPayload" + }, + { + "$ref": "#/components/schemas/AgentTurnResponseTurnCompletePayload" + }, + { + "$ref": "#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload" + } + ], + "title": "Payload" + } + }, + "required": [ + "payload" + ], + "title": "AgentTurnResponseEvent", + "type": "object" + }, + "AgentTurnResponseStepCompletePayload": { + "description": "Payload for step completion events in agent turn responses.", + "properties": { + "event_type": { + "const": "step_complete", + "default": "step_complete", + "title": "Event Type", + "type": "string" + }, + "step_type": { + "$ref": "#/components/schemas/StepType" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "step_details": { + "discriminator": { + "mapping": { + "inference": "#/$defs/InferenceStep", + "memory_retrieval": "#/$defs/MemoryRetrievalStep", + "shield_call": "#/$defs/ShieldCallStep", + "tool_execution": "#/$defs/ToolExecutionStep" + }, + "propertyName": "step_type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep" + }, + { + "$ref": "#/components/schemas/ShieldCallStep" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep" + } + ], + "title": "Step Details" + } + }, + "required": [ + "step_type", + "step_id", + "step_details" + ], + "title": "AgentTurnResponseStepCompletePayload", + "type": "object" + }, + "AgentTurnResponseStepProgressPayload": { + "description": "Payload for step progress events in agent turn responses.", + "properties": { + "event_type": { + "const": "step_progress", + "default": "step_progress", + "title": "Event Type", + "type": "string" + }, + "step_type": { + "$ref": "#/components/schemas/StepType" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "delta": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageDelta", + "text": "#/$defs/TextDelta", + "tool_call": "#/$defs/ToolCallDelta" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/TextDelta" + }, + { + "$ref": "#/components/schemas/ImageDelta" + }, + { + "$ref": "#/components/schemas/ToolCallDelta" + } + ], + "title": "Delta" + } + }, + "required": [ + "step_type", + "step_id", + "delta" + ], + "title": "AgentTurnResponseStepProgressPayload", + "type": "object" + }, + "AgentTurnResponseStepStartPayload": { + "description": "Payload for step start events in agent turn responses.", + "properties": { + "event_type": { + "const": "step_start", + "default": "step_start", + "title": "Event Type", + "type": "string" + }, + "step_type": { + "$ref": "#/components/schemas/StepType" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "required": [ + "step_type", + "step_id" + ], + "title": "AgentTurnResponseStepStartPayload", + "type": "object" + }, + "AgentTurnResponseStreamChunk": { + "description": "Streamed agent turn completion response.", + "properties": { + "event": { + "$ref": "#/components/schemas/AgentTurnResponseEvent" + } + }, + "required": [ + "event" + ], + "title": "AgentTurnResponseStreamChunk", + "type": "object" + }, + "AgentTurnResponseTurnAwaitingInputPayload": { + "description": "Payload for turn awaiting input events in agent turn responses.", + "properties": { + "event_type": { + "const": "turn_awaiting_input", + "default": "turn_awaiting_input", + "title": "Event Type", + "type": "string" + }, + "turn": { + "$ref": "#/components/schemas/Turn" + } + }, + "required": [ + "turn" + ], + "title": "AgentTurnResponseTurnAwaitingInputPayload", + "type": "object" + }, + "AgentTurnResponseTurnCompletePayload": { + "description": "Payload for turn completion events in agent turn responses.", + "properties": { + "event_type": { + "const": "turn_complete", + "default": "turn_complete", + "title": "Event Type", + "type": "string" + }, + "turn": { + "$ref": "#/components/schemas/Turn" + } + }, + "required": [ + "turn" + ], + "title": "AgentTurnResponseTurnCompletePayload", + "type": "object" + }, + "AgentTurnResponseTurnStartPayload": { + "description": "Payload for turn start events in agent turn responses.", + "properties": { + "event_type": { + "const": "turn_start", + "default": "turn_start", + "title": "Event Type", + "type": "string" + }, + "turn_id": { + "title": "Turn Id", + "type": "string" + } + }, + "required": [ + "turn_id" + ], + "title": "AgentTurnResponseTurnStartPayload", + "type": "object" + }, + "AgentTurnResumeRequest": { + "description": "Request to resume an agent turn with tool responses.", + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "session_id": { + "title": "Session Id", + "type": "string" + }, + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse" + }, + "title": "Tool Responses", + "type": "array" + }, + "stream": { + "default": false, + "title": "Stream", + "type": "boolean" + } + }, + "required": [ + "agent_id", + "session_id", + "turn_id", + "tool_responses" + ], + "title": "AgentTurnResumeRequest", + "type": "object" + }, + "CompletionMessage": { + "description": "A message containing the model's (assistant) response in a chat conversation.", + "properties": { + "role": { + "const": "assistant", + "default": "assistant", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "type": "object" + }, + "InferenceStep": { + "description": "An inference step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "inference", + "default": "inference", + "title": "Step Type", + "type": "string" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage" + } + }, + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "type": "object" + }, + "ListOpenAIResponseInputItem": { + "description": "List container for OpenAI response input items.", + "properties": { + "data": { + "items": { + "anyOf": [ + { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "title": "Data", + "type": "array" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data" + ], + "title": "ListOpenAIResponseInputItem", + "type": "object" + }, + "ListOpenAIResponseObject": { + "description": "Paginated list of OpenAI response objects with navigation metadata.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIResponseObjectWithInput" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIResponseObject", + "type": "object" + }, + "MemoryRetrievalStep": { + "description": "A memory retrieval step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "memory_retrieval", + "default": "memory_retrieval", + "title": "Step Type", + "type": "string" + }, + "vector_store_ids": { + "title": "Vector Store Ids", + "type": "string" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "type": "object" + }, + "OpenAIDeleteResponseObject": { + "description": "Response object confirming deletion of an OpenAI response.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "const": "response", + "default": "response", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "OpenAIDeleteResponseObject", + "type": "object" + }, + "PaginatedResponse": { + "description": "A generic paginated response that follows a simple format.", + "properties": { + "data": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "url": { + "title": "Url", + "type": "string", + "nullable": true + } + }, + "required": [ + "data", + "has_more" + ], + "title": "PaginatedResponse", + "type": "object" + }, + "ResponseGuardrailSpec": { + "description": "Specification for a guardrail to apply during response generation.", + "properties": { + "type": { + "title": "Type", + "type": "string" + } + }, + "required": [ + "type" + ], + "title": "ResponseGuardrailSpec", + "type": "object" + }, + "Session": { + "description": "A single session of an interaction with an Agentic System.", + "properties": { + "session_id": { + "title": "Session Id", + "type": "string" + }, + "session_name": { + "title": "Session Name", + "type": "string" + }, + "turns": { + "items": { + "$ref": "#/components/schemas/Turn" + }, + "title": "Turns", + "type": "array" + }, + "started_at": { + "format": "date-time", + "title": "Started At", + "type": "string" + } + }, + "required": [ + "session_id", + "session_name", + "turns", + "started_at" + ], + "title": "Session", + "type": "object" + }, + "ShieldCallStep": { + "description": "A shield call step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "shield_call", + "default": "shield_call", + "title": "Step Type", + "type": "string" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "type": "object" + }, + "ToolExecutionStep": { + "description": "A tool execution step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "tool_execution", + "default": "tool_execution", + "title": "Step Type", + "type": "string" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "title": "Tool Calls", + "type": "array" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse" + }, + "title": "Tool Responses", + "type": "array" + } + }, + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "type": "object" + }, + "ToolResponse": { + "description": "Response from a tool invocation.", + "properties": { + "call_id": { + "title": "Call Id", + "type": "string" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "type": "object" + }, + "ToolResponseMessage": { + "description": "A message representing the result of a tool invocation.", + "properties": { + "role": { + "const": "tool", + "default": "tool", + "title": "Role", + "type": "string" + }, + "call_id": { + "title": "Call Id", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "required": [ + "call_id", + "content" + ], + "title": "ToolResponseMessage", + "type": "object" + }, + "UserMessage": { + "description": "A message from the user in a chat conversation.", + "properties": { + "role": { + "const": "user", + "default": "user", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Context", + "nullable": true + } + }, + "required": [ + "content" + ], + "title": "UserMessage", + "type": "object" + }, + "ListBatchesResponse": { + "description": "Response containing a list of batch objects.", + "properties": { + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "description": "List of batch objects", + "items": { + "$ref": "#/components/schemas/Batch" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "description": "ID of the first batch in the list", + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "description": "ID of the last batch in the list", + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "description": "Whether there are more batches available", + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "ListBatchesResponse", + "type": "object" + }, + "ConversationCreateRequest": { + "description": "Request body for creating a conversation.", + "properties": { + "items": { + "default": [], + "description": "Initial items to include in the conversation context. You may add up to 20 items at a time.", + "title": "Items", + "items": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/$defs/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/$defs/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ] + }, + "maxItems": 20, + "type": "array" + }, + "metadata": { + "default": {}, + "description": "Set of 16 key-value pairs that can be attached to an object. Useful for storing additional information", + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "maxProperties": 16, + "type": "object" + } + }, + "title": "ConversationCreateRequest", + "type": "object" + }, + "ConversationDeletedResource": { + "description": "Response for deleted conversation.", + "properties": { + "id": { + "description": "The deleted conversation identifier", + "title": "Id", + "type": "string" + }, + "object": { + "default": "conversation.deleted", + "description": "Object type", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "description": "Whether the object was deleted", + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "ConversationDeletedResource", + "type": "object" + }, + "ConversationItemCreateRequest": { + "description": "Request body for creating conversation items.", + "properties": { + "items": { + "description": "Items to include in the conversation context. You may add up to 20 items at a time.", + "items": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/$defs/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/$defs/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ] + }, + "maxItems": 20, + "title": "Items", + "type": "array" + } + }, + "required": [ + "items" + ], + "title": "ConversationItemCreateRequest", + "type": "object" + }, + "ConversationItemDeletedResource": { + "description": "Response for deleted conversation item.", + "properties": { + "id": { + "description": "The deleted item identifier", + "title": "Id", + "type": "string" + }, + "object": { + "default": "conversation.item.deleted", + "description": "Object type", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "description": "Whether the object was deleted", + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "ConversationItemDeletedResource", + "type": "object" + }, + "ConversationUpdateRequest": { + "description": "Request body for updating a conversation.", + "properties": { + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters.", + "title": "Metadata", + "type": "object" + } + }, + "required": [ + "metadata" + ], + "title": "ConversationUpdateRequest", + "type": "object" + }, + "ExpiresAfter": { + "description": "Control expiration of uploaded files.\n\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)", + "properties": { + "anchor": { + "const": "created_at", + "title": "Anchor", + "type": "string" + }, + "seconds": { + "maximum": 2592000, + "minimum": 3600, + "title": "Seconds", + "type": "integer" + } + }, + "required": [ + "anchor", + "seconds" + ], + "title": "ExpiresAfter", + "type": "object" + }, + "ListOpenAIFileResponse": { + "description": "Response for listing files in OpenAI Files API.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIFileObject" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIFileResponse", + "type": "object" + }, + "OpenAIFileDeleteResponse": { + "description": "Response for deleting a file in OpenAI Files API.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "const": "file", + "default": "file", + "title": "Object", + "type": "string" + }, + "deleted": { + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id", + "deleted" + ], + "title": "OpenAIFileDeleteResponse", + "type": "object" + }, + "Bf16QuantizationConfig": { + "description": "Configuration for BFloat16 precision (typically no quantization).", + "properties": { + "type": { + "const": "bf16", + "default": "bf16", + "title": "Type", + "type": "string" + } + }, + "title": "Bf16QuantizationConfig", + "type": "object" + }, + "ChatCompletionRequest": { + "properties": { + "model": { + "title": "Model", + "type": "string" + }, + "messages": { + "items": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/CompletionMessage", + "system": "#/$defs/SystemMessage", + "tool": "#/$defs/ToolResponseMessage", + "user": "#/$defs/UserMessage" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/UserMessage" + }, + { + "$ref": "#/components/schemas/SystemMessage" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage" + }, + { + "$ref": "#/components/schemas/CompletionMessage" + } + ] + }, + "title": "Messages", + "type": "array" + }, + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "tools": { + "title": "Tools", + "items": { + "$ref": "#/components/schemas/ToolDefinition" + }, + "type": "array" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + }, + "response_format": { + "title": "Response Format", + "discriminator": { + "mapping": { + "grammar": "#/$defs/GrammarResponseFormat", + "json_schema": "#/$defs/JsonSchemaResponseFormat" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/JsonSchemaResponseFormat" + }, + { + "$ref": "#/components/schemas/GrammarResponseFormat" + } + ], + "nullable": true + }, + "stream": { + "default": false, + "title": "Stream", + "type": "boolean" + }, + "logprobs": { + "$ref": "#/components/schemas/LogProbConfig", + "nullable": true + } + }, + "required": [ + "model", + "messages" + ], + "title": "ChatCompletionRequest", + "type": "object" + }, + "ChatCompletionResponse": { + "description": "Response from a chat completion request.", + "properties": { + "metrics": { + "title": "Metrics", + "items": { + "$ref": "#/components/schemas/MetricInResponse" + }, + "type": "array", + "nullable": true + }, + "completion_message": { + "$ref": "#/components/schemas/CompletionMessage" + }, + "logprobs": { + "title": "Logprobs", + "items": { + "$ref": "#/components/schemas/TokenLogProbs" + }, + "type": "array", + "nullable": true + } + }, + "required": [ + "completion_message" + ], + "title": "ChatCompletionResponse", + "type": "object" + }, + "ChatCompletionResponseEvent": { + "description": "An event during chat completion generation.", + "properties": { + "event_type": { + "$ref": "#/components/schemas/ChatCompletionResponseEventType" + }, + "delta": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageDelta", + "text": "#/$defs/TextDelta", + "tool_call": "#/$defs/ToolCallDelta" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/TextDelta" + }, + { + "$ref": "#/components/schemas/ImageDelta" + }, + { + "$ref": "#/components/schemas/ToolCallDelta" + } + ], + "title": "Delta" + }, + "logprobs": { + "title": "Logprobs", + "items": { + "$ref": "#/components/schemas/TokenLogProbs" + }, + "type": "array", + "nullable": true + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason", + "nullable": true + } + }, + "required": [ + "event_type", + "delta" + ], + "title": "ChatCompletionResponseEvent", + "type": "object" + }, + "ChatCompletionResponseStreamChunk": { + "description": "A chunk of a streamed chat completion response.", + "properties": { + "metrics": { + "title": "Metrics", + "items": { + "$ref": "#/components/schemas/MetricInResponse" + }, + "type": "array", + "nullable": true + }, + "event": { + "$ref": "#/components/schemas/ChatCompletionResponseEvent" + } + }, + "required": [ + "event" + ], + "title": "ChatCompletionResponseStreamChunk", + "type": "object" + }, + "CompletionResponse": { + "description": "Response from a completion request.", + "properties": { + "metrics": { + "title": "Metrics", + "items": { + "$ref": "#/components/schemas/MetricInResponse" + }, + "type": "array", + "nullable": true + }, + "content": { + "title": "Content", + "type": "string" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "logprobs": { + "title": "Logprobs", + "items": { + "$ref": "#/components/schemas/TokenLogProbs" + }, + "type": "array", + "nullable": true + } + }, + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionResponse", + "type": "object" + }, + "CompletionResponseStreamChunk": { + "description": "A chunk of a streamed completion response.", + "properties": { + "metrics": { + "title": "Metrics", + "items": { + "$ref": "#/components/schemas/MetricInResponse" + }, + "type": "array", + "nullable": true + }, + "delta": { + "title": "Delta", + "type": "string" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason", + "nullable": true + }, + "logprobs": { + "title": "Logprobs", + "items": { + "$ref": "#/components/schemas/TokenLogProbs" + }, + "type": "array", + "nullable": true + } + }, + "required": [ + "delta" + ], + "title": "CompletionResponseStreamChunk", + "type": "object" + }, + "EmbeddingsResponse": { + "description": "Response containing generated embeddings.", + "properties": { + "embeddings": { + "items": { + "items": { + "type": "number" + }, + "type": "array" + }, + "title": "Embeddings", + "type": "array" + } + }, + "required": [ + "embeddings" + ], + "title": "EmbeddingsResponse", + "type": "object" + }, + "Fp8QuantizationConfig": { + "description": "Configuration for 8-bit floating point quantization.", + "properties": { + "type": { + "const": "fp8_mixed", + "default": "fp8_mixed", + "title": "Type", + "type": "string" + } + }, + "title": "Fp8QuantizationConfig", + "type": "object" + }, + "Int4QuantizationConfig": { + "description": "Configuration for 4-bit integer quantization.", + "properties": { + "type": { + "const": "int4_mixed", + "default": "int4_mixed", + "title": "Type", + "type": "string" + }, + "scheme": { + "default": "int4_weight_int8_dynamic_activation", + "title": "Scheme", + "type": "string" + } + }, + "title": "Int4QuantizationConfig", + "type": "object" + }, + "ListOpenAIChatCompletionResponse": { + "description": "Response from listing OpenAI-compatible chat completions.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIChatCompletionResponse", + "type": "object" + }, + "OpenAIAssistantMessageParam": { + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.", + "properties": { + "role": { + "const": "assistant", + "default": "assistant", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content", + "nullable": true + }, + "name": { + "title": "Name", + "type": "string", + "nullable": true + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAIAssistantMessageParam", + "type": "object" + }, + "OpenAIChatCompletionChunk": { + "description": "Chunk from a streaming response to an OpenAI-compatible chat completion request.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChunkChoice" + }, + "title": "Choices", + "type": "array" + }, + "object": { + "const": "chat.completion.chunk", + "default": "chat.completion.chunk", + "title": "Object", + "type": "string" + }, + "created": { + "title": "Created", + "type": "integer" + }, + "model": { + "title": "Model", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage", + "nullable": true + } + }, + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAIChatCompletionChunk", + "type": "object" + }, + "OpenAIChoice": { + "description": "A choice from an OpenAI-compatible chat completion response.", + "properties": { + "message": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/OpenAIAssistantMessageParam", + "developer": "#/$defs/OpenAIDeveloperMessageParam", + "system": "#/$defs/OpenAISystemMessageParam", + "tool": "#/$defs/OpenAIToolMessageParam", + "user": "#/$defs/OpenAIUserMessageParam" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message" + }, + "finish_reason": { + "title": "Finish Reason", + "type": "string" + }, + "index": { + "title": "Index", + "type": "integer" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "nullable": true + } + }, + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "type": "object" + }, + "OpenAIChoiceDelta": { + "description": "A delta from an OpenAI-compatible chat completion streaming response.", + "properties": { + "content": { + "title": "Content", + "type": "string", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "type": "string", + "nullable": true + }, + "role": { + "title": "Role", + "type": "string", + "nullable": true + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array", + "nullable": true + }, + "reasoning_content": { + "title": "Reasoning Content", + "type": "string", + "nullable": true + } + }, + "title": "OpenAIChoiceDelta", + "type": "object" + }, + "OpenAIChoiceLogprobs": { + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.", + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAIChoiceLogprobs", + "type": "object" + }, + "OpenAIChunkChoice": { + "description": "A chunk choice from an OpenAI-compatible chat completion streaming response.", + "properties": { + "delta": { + "$ref": "#/components/schemas/OpenAIChoiceDelta" + }, + "finish_reason": { + "title": "Finish Reason", + "type": "string" + }, + "index": { + "title": "Index", + "type": "integer" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "nullable": true + } + }, + "required": [ + "delta", + "finish_reason", + "index" + ], + "title": "OpenAIChunkChoice", + "type": "object" + }, + "OpenAICompletionChoice": { + "description": "A choice from an OpenAI-compatible completion response.", + "properties": { + "finish_reason": { + "title": "Finish Reason", + "type": "string" + }, + "text": { + "title": "Text", + "type": "string" + }, + "index": { + "title": "Index", + "type": "integer" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "nullable": true + } + }, + "required": [ + "finish_reason", + "text", + "index" + ], + "title": "OpenAICompletionChoice", + "type": "object" + }, + "OpenAICompletionLogprobs": { + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible completion response.", + "properties": { + "text_offset": { + "title": "Text Offset", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "token_logprobs": { + "title": "Token Logprobs", + "items": { + "type": "number" + }, + "type": "array", + "nullable": true + }, + "tokens": { + "title": "Tokens", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "top_logprobs": { + "title": "Top Logprobs", + "items": { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAICompletionLogprobs", + "type": "object" + }, + "OpenAICompletionWithInputMessages": { + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "title": "Choices", + "type": "array" + }, + "object": { + "const": "chat.completion", + "default": "chat.completion", + "title": "Object", + "type": "string" + }, + "created": { + "title": "Created", + "type": "integer" + }, + "model": { + "title": "Model", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage", + "nullable": true + }, + "input_messages": { + "items": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/OpenAIAssistantMessageParam", + "developer": "#/$defs/OpenAIDeveloperMessageParam", + "system": "#/$defs/OpenAISystemMessageParam", + "tool": "#/$defs/OpenAIToolMessageParam", + "user": "#/$defs/OpenAIUserMessageParam" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ] + }, + "title": "Input Messages", + "type": "array" + } + }, + "required": [ + "id", + "choices", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages", + "type": "object" + }, + "OpenAIUserMessageParam": { + "description": "A message from the user in an OpenAI-compatible chat completion request.", + "properties": { + "role": { + "const": "user", + "default": "user", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "discriminator": { + "mapping": { + "file": "#/$defs/OpenAIFile", + "image_url": "#/$defs/OpenAIChatCompletionContentPartImageParam", + "text": "#/$defs/OpenAIChatCompletionContentPartTextParam" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string", + "nullable": true + } + }, + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "type": "object" + }, + "TokenLogProbs": { + "description": "Log probabilities for generated tokens.", + "properties": { + "logprobs_by_token": { + "additionalProperties": { + "type": "number" + }, + "title": "Logprobs By Token", + "type": "object" + } + }, + "required": [ + "logprobs_by_token" + ], + "title": "TokenLogProbs", + "type": "object" + }, + "Checkpoint": { + "description": "Checkpoint created during training runs.", + "properties": { + "identifier": { + "title": "Identifier", + "type": "string" + }, + "created_at": { + "format": "date-time", + "title": "Created At", + "type": "string" + }, + "epoch": { + "title": "Epoch", + "type": "integer" + }, + "post_training_job_id": { + "title": "Post Training Job Id", + "type": "string" + }, + "path": { + "title": "Path", + "type": "string" + }, + "training_metrics": { + "$ref": "#/components/schemas/PostTrainingMetric", + "nullable": true + } + }, + "required": [ + "identifier", + "created_at", + "epoch", + "post_training_job_id", + "path" + ], + "title": "Checkpoint", + "type": "object" + }, + "LoraFinetuningConfig": { + "description": "Configuration for Low-Rank Adaptation (LoRA) fine-tuning.", + "properties": { + "type": { + "const": "LoRA", + "default": "LoRA", + "title": "Type", + "type": "string" + }, + "lora_attn_modules": { + "items": { + "type": "string" + }, + "title": "Lora Attn Modules", + "type": "array" + }, + "apply_lora_to_mlp": { + "title": "Apply Lora To Mlp", + "type": "boolean" + }, + "apply_lora_to_output": { + "title": "Apply Lora To Output", + "type": "boolean" + }, + "rank": { + "title": "Rank", + "type": "integer" + }, + "alpha": { + "title": "Alpha", + "type": "integer" + }, + "use_dora": { + "default": false, + "title": "Use Dora", + "type": "boolean" + }, + "quantize_base": { + "default": false, + "title": "Quantize Base", + "type": "boolean" + } + }, + "required": [ + "lora_attn_modules", + "apply_lora_to_mlp", + "apply_lora_to_output", + "rank", + "alpha" + ], + "title": "LoraFinetuningConfig", + "type": "object" + }, + "PostTrainingJobArtifactsResponse": { + "description": "Artifacts of a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "checkpoints": { + "items": { + "$ref": "#/components/schemas/Checkpoint" + }, + "title": "Checkpoints", + "type": "array" + } + }, + "required": [ + "job_uuid" + ], + "title": "PostTrainingJobArtifactsResponse", + "type": "object" + }, + "PostTrainingJobLogStream": { + "description": "Stream of logs from a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "log_lines": { + "items": { + "type": "string" + }, + "title": "Log Lines", + "type": "array" + } + }, + "required": [ + "job_uuid", + "log_lines" + ], + "title": "PostTrainingJobLogStream", + "type": "object" + }, + "PostTrainingJobStatusResponse": { + "description": "Status of a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + }, + "scheduled_at": { + "title": "Scheduled At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "resources_allocated": { + "title": "Resources Allocated", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "checkpoints": { + "items": { + "$ref": "#/components/schemas/Checkpoint" + }, + "title": "Checkpoints", + "type": "array" + } + }, + "required": [ + "job_uuid", + "status" + ], + "title": "PostTrainingJobStatusResponse", + "type": "object" + }, + "PostTrainingRLHFRequest": { + "description": "Request to finetune a model using reinforcement learning from human feedback.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "finetuned_model": { + "$ref": "#/components/schemas/URL" + }, + "dataset_id": { + "title": "Dataset Id", + "type": "string" + }, + "validation_dataset_id": { + "title": "Validation Dataset Id", + "type": "string" + }, + "algorithm": { + "$ref": "#/components/schemas/RLHFAlgorithm" + }, + "algorithm_config": { + "$ref": "#/components/schemas/DPOAlignmentConfig" + }, + "optimizer_config": { + "$ref": "#/components/schemas/OptimizerConfig" + }, + "training_config": { + "$ref": "#/components/schemas/TrainingConfig" + }, + "hyperparam_search_config": { + "additionalProperties": true, + "title": "Hyperparam Search Config", + "type": "object" + }, + "logger_config": { + "additionalProperties": true, + "title": "Logger Config", + "type": "object" + } + }, + "required": [ + "job_uuid", + "finetuned_model", + "dataset_id", + "validation_dataset_id", + "algorithm", + "algorithm_config", + "optimizer_config", + "training_config", + "hyperparam_search_config", + "logger_config" + ], + "title": "PostTrainingRLHFRequest", + "type": "object" + }, + "QATFinetuningConfig": { + "description": "Configuration for Quantization-Aware Training (QAT) fine-tuning.", + "properties": { + "type": { + "const": "QAT", + "default": "QAT", + "title": "Type", + "type": "string" + }, + "quantizer_name": { + "title": "Quantizer Name", + "type": "string" + }, + "group_size": { + "title": "Group Size", + "type": "integer" + } + }, + "required": [ + "quantizer_name", + "group_size" + ], + "title": "QATFinetuningConfig", + "type": "object" + }, + "ScoringFn": { + "description": "A scoring function resource for evaluating model outputs.", + "properties": { + "identifier": { + "description": "Unique identifier for this resource in llama stack", + "title": "Identifier", + "type": "string" + }, + "provider_resource_id": { + "description": "Unique identifier for this resource in the provider", + "title": "Provider Resource Id", + "type": "string", + "nullable": true + }, + "provider_id": { + "description": "ID of the provider that owns this resource", + "title": "Provider Id", + "type": "string" + }, + "type": { + "const": "scoring_function", + "default": "scoring_function", + "title": "Type", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string", + "nullable": true + }, + "metadata": { + "additionalProperties": true, + "description": "Any additional metadata for this definition", + "title": "Metadata", + "type": "object" + }, + "return_type": { + "description": "The return type of the deterministic function", + "discriminator": { + "mapping": { + "agent_turn_input": "#/$defs/AgentTurnInputType", + "array": "#/$defs/ArrayType", + "boolean": "#/$defs/BooleanType", + "chat_completion_input": "#/$defs/ChatCompletionInputType", + "completion_input": "#/$defs/CompletionInputType", + "json": "#/$defs/JsonType", + "number": "#/$defs/NumberType", + "object": "#/$defs/ObjectType", + "string": "#/$defs/StringType", + "union": "#/$defs/UnionType" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type" + }, + "params": { + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "title": "Params", + "discriminator": { + "mapping": { + "basic": "#/$defs/BasicScoringFnParams", + "llm_as_judge": "#/$defs/LLMAsJudgeScoringFnParams", + "regex_parser": "#/$defs/RegexParserScoringFnParams" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "nullable": true + } + }, + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "type": "object" + }, + "SyntheticDataGenerationRequest": { + "description": "Request to generate synthetic data. A small batch of prompts and a filtering function", + "properties": { + "dialogs": { + "items": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/CompletionMessage", + "system": "#/$defs/SystemMessage", + "tool": "#/$defs/ToolResponseMessage", + "user": "#/$defs/UserMessage" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/UserMessage" + }, + { + "$ref": "#/components/schemas/SystemMessage" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage" + }, + { + "$ref": "#/components/schemas/CompletionMessage" + } + ] + }, + "title": "Dialogs", + "type": "array" + }, + "filtering_function": { + "$ref": "#/components/schemas/FilteringFunction", + "default": "none" + }, + "model": { + "title": "Model", + "type": "string", + "nullable": true + } + }, + "required": [ + "dialogs" + ], + "title": "SyntheticDataGenerationRequest", + "type": "object" + }, + "SyntheticDataGenerationResponse": { + "description": "Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold.", + "properties": { + "synthetic_data": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Synthetic Data", + "type": "array" + }, + "statistics": { + "title": "Statistics", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "required": [ + "synthetic_data" + ], + "title": "SyntheticDataGenerationResponse", + "type": "object" + }, + "ListToolDefsResponse": { + "description": "Response containing a list of tool definitions.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "title": "Data", + "type": "array" + } + }, + "required": [ + "data" + ], + "title": "ListToolDefsResponse", + "type": "object" + }, + "RAGDocument": { + "description": "A document to be used for document ingestion in the RAG Tool.", + "properties": { + "document_id": { + "title": "Document Id", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "title": "Mime Type", + "type": "string", + "nullable": true + }, + "metadata": { + "additionalProperties": true, + "title": "Metadata", + "type": "object" + } + }, + "required": [ + "document_id", + "content" + ], + "title": "RAGDocument", + "type": "object" + }, + "ToolGroupInput": { + "description": "Input data for registering a tool group.", + "properties": { + "toolgroup_id": { + "title": "Toolgroup Id", + "type": "string" + }, + "provider_id": { + "title": "Provider Id", + "type": "string" + }, + "args": { + "title": "Args", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "mcp_endpoint": { + "$ref": "#/components/schemas/URL", + "nullable": true + } + }, + "required": [ + "toolgroup_id", + "provider_id" + ], + "title": "ToolGroupInput", + "type": "object" + }, + "Chunk": { + "description": "A chunk of content that can be inserted into a vector database.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "chunk_id": { + "title": "Chunk Id", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "title": "Metadata", + "type": "object" + }, + "embedding": { + "title": "Embedding", + "items": { + "type": "number" + }, + "type": "array", + "nullable": true + }, + "chunk_metadata": { + "$ref": "#/components/schemas/ChunkMetadata", + "nullable": true + } + }, + "required": [ + "content", + "chunk_id" + ], + "title": "Chunk", + "type": "object" + }, + "VectorStoreCreateRequest": { + "description": "Request to create a vector store.", + "properties": { + "name": { + "title": "Name", + "type": "string", + "nullable": true + }, + "file_ids": { + "items": { + "type": "string" + }, + "title": "File Ids", + "type": "array" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "metadata": { + "additionalProperties": true, + "title": "Metadata", + "type": "object" + } + }, + "title": "VectorStoreCreateRequest", + "type": "object" + }, + "VectorStoreDeleteResponse": { + "description": "Response from deleting a vector store.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "default": "vector_store.deleted", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "VectorStoreDeleteResponse", + "type": "object" + }, + "VectorStoreFileContentsResponse": { + "description": "Response from retrieving the contents of a vector store file.", + "properties": { + "file_id": { + "title": "File Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + }, + "attributes": { + "additionalProperties": true, + "title": "Attributes", + "type": "object" + }, + "content": { + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "title": "Content", + "type": "array" + } + }, + "required": [ + "file_id", + "filename", + "attributes", + "content" + ], + "title": "VectorStoreFileContentsResponse", + "type": "object" + }, + "VectorStoreFileDeleteResponse": { + "description": "Response from deleting a vector store file.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "default": "vector_store.file.deleted", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "VectorStoreFileDeleteResponse", + "type": "object" + }, + "VectorStoreFilesListInBatchResponse": { + "description": "Response from listing files in a vector store file batch.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreFileObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreFilesListInBatchResponse", + "type": "object" + }, + "VectorStoreListFilesResponse": { + "description": "Response from listing files in a vector store.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreFileObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreListFilesResponse", + "type": "object" + }, + "VectorStoreListResponse": { + "description": "Response from listing vector stores.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreListResponse", + "type": "object" + }, + "VectorStoreModifyRequest": { + "description": "Request to modify a vector store.", + "properties": { + "name": { + "title": "Name", + "type": "string", + "nullable": true + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "title": "VectorStoreModifyRequest", + "type": "object" + }, + "VectorStoreSearchRequest": { + "description": "Request to search a vector store.", + "properties": { + "query": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Query" + }, + "filters": { + "title": "Filters", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "max_num_results": { + "default": 10, + "title": "Max Num Results", + "type": "integer" + }, + "ranking_options": { + "title": "Ranking Options", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "rewrite_query": { + "default": false, + "title": "Rewrite Query", + "type": "boolean" + } + }, + "required": [ + "query" + ], + "title": "VectorStoreSearchRequest", + "type": "object" + }, + "OpenAIResponseContentPartOutputText": { + "description": "Text content within a streamed response part.", + "properties": { + "type": { + "const": "output_text", + "default": "output_text", + "title": "Type", + "type": "string" + }, + "text": { + "title": "Text", + "type": "string" + }, + "annotations": { + "items": { + "discriminator": { + "mapping": { + "container_file_citation": "#/$defs/OpenAIResponseAnnotationContainerFileCitation", + "file_citation": "#/$defs/OpenAIResponseAnnotationFileCitation", + "file_path": "#/$defs/OpenAIResponseAnnotationFilePath", + "url_citation": "#/$defs/OpenAIResponseAnnotationCitation" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath" + } + ] + }, + "title": "Annotations", + "type": "array" + }, + "logprobs": { + "title": "Logprobs", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "nullable": true + } + }, + "required": [ + "text" + ], + "title": "OpenAIResponseContentPartOutputText", + "type": "object" + }, + "OpenAIResponseContentPartReasoningSummary": { + "description": "Reasoning summary part in a streamed response.", + "properties": { + "type": { + "const": "summary_text", + "default": "summary_text", + "title": "Type", + "type": "string" + }, + "text": { + "title": "Text", + "type": "string" + } + }, + "required": [ + "text" + ], + "title": "OpenAIResponseContentPartReasoningSummary", + "type": "object" + }, + "OpenAIResponseContentPartReasoningText": { + "description": "Reasoning text emitted as part of a streamed response.", + "properties": { + "type": { + "const": "reasoning_text", + "default": "reasoning_text", + "title": "Type", + "type": "string" + }, + "text": { + "title": "Text", + "type": "string" + } + }, + "required": [ + "text" + ], + "title": "OpenAIResponseContentPartReasoningText", + "type": "object" + }, + "OpenAIResponseMessage": { + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "discriminator": { + "mapping": { + "input_file": "#/$defs/OpenAIResponseInputMessageContentFile", + "input_image": "#/$defs/OpenAIResponseInputMessageContentImage", + "input_text": "#/$defs/OpenAIResponseInputMessageContentText" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ] + }, + "type": "array" + }, + { + "items": { + "discriminator": { + "mapping": { + "output_text": "#/$defs/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/$defs/OpenAIResponseContentPartRefusal" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "const": "system", + "type": "string" + }, + { + "const": "developer", + "type": "string" + }, + { + "const": "user", + "type": "string" + }, + { + "const": "assistant", + "type": "string" + } + ], + "title": "Role" + }, + "type": { + "const": "message", + "default": "message", + "title": "Type", + "type": "string" + }, + "id": { + "title": "Id", + "type": "string", + "nullable": true + }, + "status": { + "title": "Status", + "type": "string", + "nullable": true + } + }, + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseCompleted": { + "description": "Streaming event indicating a response has been completed.", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "type": { + "const": "response.completed", + "default": "response.completed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response" + ], + "title": "OpenAIResponseObjectStreamResponseCompleted", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseContentPartAdded": { + "description": "Streaming event for when a new content part is added to a response item.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "response_id": { + "title": "Response Id", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "part": { + "discriminator": { + "mapping": { + "output_text": "#/$defs/OpenAIResponseContentPartOutputText", + "reasoning_text": "#/$defs/OpenAIResponseContentPartReasoningText", + "refusal": "#/$defs/OpenAIResponseContentPartRefusal" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseContentPartOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartReasoningText" + } + ], + "title": "Part" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.content_part.added", + "default": "response.content_part.added", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "response_id", + "item_id", + "output_index", + "part", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseContentPartAdded", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseContentPartDone": { + "description": "Streaming event for when a content part is completed.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "response_id": { + "title": "Response Id", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "part": { + "discriminator": { + "mapping": { + "output_text": "#/$defs/OpenAIResponseContentPartOutputText", + "reasoning_text": "#/$defs/OpenAIResponseContentPartReasoningText", + "refusal": "#/$defs/OpenAIResponseContentPartRefusal" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseContentPartOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartReasoningText" + } + ], + "title": "Part" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.content_part.done", + "default": "response.content_part.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "response_id", + "item_id", + "output_index", + "part", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseContentPartDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseCreated": { + "description": "Streaming event indicating a new response has been created.", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "type": { + "const": "response.created", + "default": "response.created", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response" + ], + "title": "OpenAIResponseObjectStreamResponseCreated", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseFailed": { + "description": "Streaming event emitted when a response fails.", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.failed", + "default": "response.failed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseFailed", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseFileSearchCallCompleted": { + "description": "Streaming event for completed file search calls.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.file_search_call.completed", + "default": "response.file_search_call.completed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseFileSearchCallCompleted", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseFileSearchCallInProgress": { + "description": "Streaming event for file search calls in progress.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.file_search_call.in_progress", + "default": "response.file_search_call.in_progress", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseFileSearchCallInProgress", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseFileSearchCallSearching": { + "description": "Streaming event for file search currently searching.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.file_search_call.searching", + "default": "response.file_search_call.searching", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseFileSearchCallSearching", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta": { + "description": "Streaming event for incremental function call argument updates.", + "properties": { + "delta": { + "title": "Delta", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.function_call_arguments.delta", + "default": "response.function_call_arguments.delta", + "title": "Type", + "type": "string" + } + }, + "required": [ + "delta", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone": { + "description": "Streaming event for when function call arguments are completed.", + "properties": { + "arguments": { + "title": "Arguments", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.function_call_arguments.done", + "default": "response.function_call_arguments.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "arguments", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseInProgress": { + "description": "Streaming event indicating the response remains in progress.", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.in_progress", + "default": "response.in_progress", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseInProgress", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseIncomplete": { + "description": "Streaming event emitted when a response ends in an incomplete state.", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.incomplete", + "default": "response.incomplete", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseIncomplete", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta": { + "properties": { + "delta": { + "title": "Delta", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_call.arguments.delta", + "default": "response.mcp_call.arguments.delta", + "title": "Type", + "type": "string" + } + }, + "required": [ + "delta", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpCallArgumentsDone": { + "properties": { + "arguments": { + "title": "Arguments", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_call.arguments.done", + "default": "response.mcp_call.arguments.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "arguments", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpCallArgumentsDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpCallCompleted": { + "description": "Streaming event for completed MCP calls.", + "properties": { + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_call.completed", + "default": "response.mcp_call.completed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpCallCompleted", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpCallFailed": { + "description": "Streaming event for failed MCP calls.", + "properties": { + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_call.failed", + "default": "response.mcp_call.failed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpCallFailed", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpCallInProgress": { + "description": "Streaming event for MCP calls in progress.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_call.in_progress", + "default": "response.mcp_call.in_progress", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpCallInProgress", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpListToolsCompleted": { + "properties": { + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_list_tools.completed", + "default": "response.mcp_list_tools.completed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpListToolsCompleted", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpListToolsFailed": { + "properties": { + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_list_tools.failed", + "default": "response.mcp_list_tools.failed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpListToolsFailed", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseMcpListToolsInProgress": { + "properties": { + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.mcp_list_tools.in_progress", + "default": "response.mcp_list_tools.in_progress", + "title": "Type", + "type": "string" + } + }, + "required": [ + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseMcpListToolsInProgress", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseOutputItemAdded": { + "description": "Streaming event for when a new output item is added to the response.", + "properties": { + "response_id": { + "title": "Response Id", + "type": "string" + }, + "item": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ], + "title": "Item" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.output_item.added", + "default": "response.output_item.added", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response_id", + "item", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseOutputItemAdded", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseOutputItemDone": { + "description": "Streaming event for when an output item is completed.", + "properties": { + "response_id": { + "title": "Response Id", + "type": "string" + }, + "item": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ], + "title": "Item" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.output_item.done", + "default": "response.output_item.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "response_id", + "item", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseOutputItemDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded": { + "description": "Streaming event for when an annotation is added to output text.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "annotation_index": { + "title": "Annotation Index", + "type": "integer" + }, + "annotation": { + "discriminator": { + "mapping": { + "container_file_citation": "#/$defs/OpenAIResponseAnnotationContainerFileCitation", + "file_citation": "#/$defs/OpenAIResponseAnnotationFileCitation", + "file_path": "#/$defs/OpenAIResponseAnnotationFilePath", + "url_citation": "#/$defs/OpenAIResponseAnnotationCitation" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath" + } + ], + "title": "Annotation" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.output_text.annotation.added", + "default": "response.output_text.annotation.added", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "content_index", + "annotation_index", + "annotation", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseOutputTextDelta": { + "description": "Streaming event for incremental text content updates.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "delta": { + "title": "Delta", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.output_text.delta", + "default": "response.output_text.delta", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "delta", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseOutputTextDelta", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseOutputTextDone": { + "description": "Streaming event for when text output is completed.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "text": { + "title": "Text", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.output_text.done", + "default": "response.output_text.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "text", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseOutputTextDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded": { + "description": "Streaming event for when a new reasoning summary part is added.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "part": { + "$ref": "#/components/schemas/OpenAIResponseContentPartReasoningSummary" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "summary_index": { + "title": "Summary Index", + "type": "integer" + }, + "type": { + "const": "response.reasoning_summary_part.added", + "default": "response.reasoning_summary_part.added", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "part", + "sequence_number", + "summary_index" + ], + "title": "OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseReasoningSummaryPartDone": { + "description": "Streaming event for when a reasoning summary part is completed.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "part": { + "$ref": "#/components/schemas/OpenAIResponseContentPartReasoningSummary" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "summary_index": { + "title": "Summary Index", + "type": "integer" + }, + "type": { + "const": "response.reasoning_summary_part.done", + "default": "response.reasoning_summary_part.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "part", + "sequence_number", + "summary_index" + ], + "title": "OpenAIResponseObjectStreamResponseReasoningSummaryPartDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta": { + "description": "Streaming event for incremental reasoning summary text updates.", + "properties": { + "delta": { + "title": "Delta", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "summary_index": { + "title": "Summary Index", + "type": "integer" + }, + "type": { + "const": "response.reasoning_summary_text.delta", + "default": "response.reasoning_summary_text.delta", + "title": "Type", + "type": "string" + } + }, + "required": [ + "delta", + "item_id", + "output_index", + "sequence_number", + "summary_index" + ], + "title": "OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseReasoningSummaryTextDone": { + "description": "Streaming event for when reasoning summary text is completed.", + "properties": { + "text": { + "title": "Text", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "summary_index": { + "title": "Summary Index", + "type": "integer" + }, + "type": { + "const": "response.reasoning_summary_text.done", + "default": "response.reasoning_summary_text.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "text", + "item_id", + "output_index", + "sequence_number", + "summary_index" + ], + "title": "OpenAIResponseObjectStreamResponseReasoningSummaryTextDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseReasoningTextDelta": { + "description": "Streaming event for incremental reasoning text updates.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "delta": { + "title": "Delta", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.reasoning_text.delta", + "default": "response.reasoning_text.delta", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "delta", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseReasoningTextDelta", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseReasoningTextDone": { + "description": "Streaming event for when reasoning text is completed.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "text": { + "title": "Text", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.reasoning_text.done", + "default": "response.reasoning_text.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "text", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseReasoningTextDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseRefusalDelta": { + "description": "Streaming event for incremental refusal text updates.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "delta": { + "title": "Delta", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.refusal.delta", + "default": "response.refusal.delta", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "delta", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseRefusalDelta", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseRefusalDone": { + "description": "Streaming event for when refusal text is completed.", + "properties": { + "content_index": { + "title": "Content Index", + "type": "integer" + }, + "refusal": { + "title": "Refusal", + "type": "string" + }, + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.refusal.done", + "default": "response.refusal.done", + "title": "Type", + "type": "string" + } + }, + "required": [ + "content_index", + "refusal", + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseRefusalDone", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseWebSearchCallCompleted": { + "description": "Streaming event for completed web search calls.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.web_search_call.completed", + "default": "response.web_search_call.completed", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseWebSearchCallCompleted", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseWebSearchCallInProgress": { + "description": "Streaming event for web search calls in progress.", + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.web_search_call.in_progress", + "default": "response.web_search_call.in_progress", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseWebSearchCallInProgress", + "type": "object" + }, + "OpenAIResponseObjectStreamResponseWebSearchCallSearching": { + "properties": { + "item_id": { + "title": "Item Id", + "type": "string" + }, + "output_index": { + "title": "Output Index", + "type": "integer" + }, + "sequence_number": { + "title": "Sequence Number", + "type": "integer" + }, + "type": { + "const": "response.web_search_call.searching", + "default": "response.web_search_call.searching", + "title": "Type", + "type": "string" + } + }, + "required": [ + "item_id", + "output_index", + "sequence_number" + ], + "title": "OpenAIResponseObjectStreamResponseWebSearchCallSearching", + "type": "object" + }, + "OpenAIResponseObjectWithInput": { + "description": "OpenAI response object extended with input context information.", + "properties": { + "created_at": { + "title": "Created At", + "type": "integer" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError", + "nullable": true + }, + "id": { + "title": "Id", + "type": "string" + }, + "model": { + "title": "Model", + "type": "string" + }, + "object": { + "const": "response", + "default": "response", + "title": "Object", + "type": "string" + }, + "output": { + "items": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + "title": "Output", + "type": "array" + }, + "parallel_tool_calls": { + "default": false, + "title": "Parallel Tool Calls", + "type": "boolean" + }, + "previous_response_id": { + "title": "Previous Response Id", + "type": "string", + "nullable": true + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt", + "nullable": true + }, + "status": { + "title": "Status", + "type": "string" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "nullable": true + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "default": { + "format": { + "type": "text" + } + } + }, + "top_p": { + "title": "Top P", + "type": "number", + "nullable": true + }, + "tools": { + "title": "Tools", + "items": { + "discriminator": { + "mapping": { + "file_search": "#/$defs/OpenAIResponseInputToolFileSearch", + "function": "#/$defs/OpenAIResponseInputToolFunction", + "mcp": "#/$defs/OpenAIResponseToolMCP", + "web_search": "#/$defs/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/$defs/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/$defs/OpenAIResponseInputToolWebSearch" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseToolMCP" + } + ] + }, + "type": "array", + "nullable": true + }, + "truncation": { + "title": "Truncation", + "type": "string", + "nullable": true + }, + "usage": { + "$ref": "#/components/schemas/OpenAIResponseUsage", + "nullable": true + }, + "instructions": { + "title": "Instructions", + "type": "string", + "nullable": true + }, + "input": { + "items": { + "anyOf": [ + { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "title": "Input", + "type": "array" + } + }, + "required": [ + "created_at", + "id", + "model", + "output", + "status", + "input" + ], + "title": "OpenAIResponseObjectWithInput", + "type": "object" + }, + "ImageContentItem": { + "description": "A image content item", + "properties": { + "type": { + "const": "image", + "default": "image", + "title": "Type", + "type": "string" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "required": [ + "image" + ], + "title": "ImageContentItem", + "type": "object" + }, + "ImageDelta": { + "description": "An image content delta for streaming responses.", + "properties": { + "type": { + "const": "image", + "default": "image", + "title": "Type", + "type": "string" + }, + "image": { + "format": "binary", + "title": "Image", + "type": "string" + } + }, + "required": [ + "image" + ], + "title": "ImageDelta", + "type": "object" + }, + "TextDelta": { + "description": "A text content delta for streaming responses.", + "properties": { + "type": { + "const": "text", + "default": "text", + "title": "Type", + "type": "string" + }, + "text": { + "title": "Text", + "type": "string" + } + }, + "required": [ + "text" + ], + "title": "TextDelta", + "type": "object" + }, + "ToolCallDelta": { + "description": "A tool call content delta for streaming responses.", + "properties": { + "type": { + "const": "tool_call", + "default": "tool_call", + "title": "Type", + "type": "string" + }, + "tool_call": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/ToolCall" + } + ], + "title": "Tool Call" + }, + "parse_status": { + "$ref": "#/components/schemas/ToolCallParseStatus" + } + }, + "required": [ + "tool_call", + "parse_status" + ], + "title": "ToolCallDelta", + "type": "object" + }, + "PostTrainingMetric": { + "description": "Training metrics captured during post-training jobs.", + "properties": { + "epoch": { + "title": "Epoch", + "type": "integer" + }, + "train_loss": { + "title": "Train Loss", + "type": "number" + }, + "validation_loss": { + "title": "Validation Loss", + "type": "number" + }, + "perplexity": { + "title": "Perplexity", + "type": "number" + } + }, + "required": [ + "epoch", + "train_loss", + "validation_loss", + "perplexity" + ], + "title": "PostTrainingMetric", + "type": "object" + }, + "DialogType": { + "description": "Parameter type for dialog data with semantic output labels.", + "properties": { + "type": { + "const": "dialog", + "default": "dialog", + "title": "Type", + "type": "string" + } + }, + "title": "DialogType", + "type": "object" + }, + "ConversationMessage": { + "description": "OpenAI-compatible message item for conversations.", + "properties": { + "id": { + "description": "unique identifier for this message", + "title": "Id", + "type": "string" + }, + "content": { + "description": "message content", + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Content", + "type": "array" + }, + "role": { + "description": "message role", + "title": "Role", + "type": "string" + }, + "status": { + "description": "message status", + "title": "Status", + "type": "string" + }, + "type": { + "const": "message", + "default": "message", + "title": "Type", + "type": "string" + }, + "object": { + "const": "message", + "default": "message", + "title": "Object", + "type": "string" + } + }, + "required": [ + "id", + "content", + "role", + "status" + ], + "title": "ConversationMessage", + "type": "object" + }, + "_agents_agent_id_session_Request": { + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "session_name": { + "title": "Session Name", + "type": "string" + } + }, + "required": [ + "agent_id", + "session_name" + ], + "title": "_agents_agent_id_session_Request", + "type": "object" + }, + "_agents_agent_id_session_session_id_turn_Request": { + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "session_id": { + "title": "Session Id", + "type": "string" + }, + "messages": { + "$ref": "#/components/schemas/UserMessage" + }, + "stream": { + "default": false, + "title": "Stream", + "type": "boolean" + }, + "documents": { + "$ref": "#/components/schemas/Document" + }, + "toolgroups": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ], + "title": "Toolgroups" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + } + }, + "required": [ + "agent_id", + "session_id", + "messages", + "documents", + "toolgroups", + "tool_config" + ], + "title": "_agents_agent_id_session_session_id_turn_Request", + "type": "object" + }, + "_agents_agent_id_session_session_id_turn_turn_id_resume_Request": { + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "session_id": { + "title": "Session Id", + "type": "string" + }, + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "tool_responses": { + "$ref": "#/components/schemas/ToolResponse" + }, + "stream": { + "default": false, + "title": "Stream", + "type": "boolean" + } + }, + "required": [ + "agent_id", + "session_id", + "turn_id", + "tool_responses" + ], + "title": "_agents_agent_id_session_session_id_turn_turn_id_resume_Request", + "type": "object" + }, + "_safety_run_shield_Request": { + "properties": { + "shield_id": { + "title": "Shield Id", + "type": "string" + }, + "messages": { + "anyOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Messages" + }, + "params": { + "title": "Params", + "type": "string" + } + }, + "required": [ + "shield_id", + "messages", + "params" + ], + "title": "_safety_run_shield_Request", + "type": "object" + }, + "_datasets_Request": { + "properties": { + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "metadata": { + "title": "Metadata", + "type": "string" + }, + "dataset_id": { + "title": "Dataset Id", + "type": "string" + } + }, + "required": [ + "purpose", + "metadata", + "dataset_id" + ], + "title": "_datasets_Request", + "type": "object" + }, + "Attachment": { + "description": "An attachment to an agent turn.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "title": "Mime Type", + "type": "string" + } + }, + "required": [ + "content", + "mime_type" + ], + "title": "Attachment", + "type": "object" + }, + "StepType": { + "description": "Type of the step in an agent turn.", + "enum": [ + "inference", + "tool_execution", + "shield_call", + "memory_retrieval" + ], + "title": "StepType", + "type": "string" + }, + "ToolCallParseStatus": { + "description": "Status of tool call parsing during streaming.", + "enum": [ + "started", + "in_progress", + "failed", + "succeeded" + ], + "title": "ToolCallParseStatus", + "type": "string" + }, + "LogProbConfig": { + "description": ":param top_k: How many tokens (for each position) to return log probabilities for.", + "properties": { + "top_k": { + "default": 0, + "title": "Top K", + "type": "integer" + } + }, + "title": "LogProbConfig", + "type": "object" + }, + "ToolDefinition": { + "properties": { + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "description": { + "title": "Description", + "type": "string", + "nullable": true + }, + "input_schema": { + "title": "Input Schema", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "output_schema": { + "title": "Output Schema", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "required": [ + "tool_name" + ], + "title": "ToolDefinition", + "type": "object" + }, + "MetricInResponse": { + "description": "A metric value included in API responses.", + "properties": { + "metric": { + "title": "Metric", + "type": "string" + }, + "value": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Value" + }, + "unit": { + "title": "Unit", + "type": "string", + "nullable": true + } + }, + "required": [ + "metric", + "value" + ], + "title": "MetricInResponse", + "type": "object" + }, + "ChatCompletionResponseEventType": { + "description": "Types of events that can occur during chat completion.", + "enum": [ + "start", + "complete", + "progress" + ], + "title": "ChatCompletionResponseEventType", + "type": "string" + }, + "RLHFAlgorithm": { + "description": "Available reinforcement learning from human feedback algorithms.", + "enum": [ + "dpo" + ], + "title": "RLHFAlgorithm", + "type": "string" + }, + "FilteringFunction": { + "description": "The type of filtering function.", + "enum": [ + "none", + "random", + "top_k", + "top_p", + "top_k_top_p", + "sigmoid" + ], + "title": "FilteringFunction", + "type": "string" + } + }, + "responses": { + "BadRequest400": { + "description": "The request was invalid or malformed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 400, + "title": "Bad Request", + "detail": "The request was invalid or malformed" + } + } + } + }, + "TooManyRequests429": { + "description": "The client has sent too many requests in a given amount of time", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 429, + "title": "Too Many Requests", + "detail": "You have exceeded the rate limit. Please try again later." + } + } + } + }, + "InternalServerError500": { + "description": "The server encountered an unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 500, + "title": "Internal Server Error", + "detail": "An unexpected error occurred. Our team has been notified." + } + } + } + }, + "DefaultError": { + "description": "An unexpected error occurred", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/static/deprecated-llama-stack-spec.yaml b/docs/static/deprecated-llama-stack-spec.yaml index d77423732..0526e24df 100644 --- a/docs/static/deprecated-llama-stack-spec.yaml +++ b/docs/static/deprecated-llama-stack-spec.yaml @@ -68,9 +68,9 @@ components: example: status: 500 title: Internal Server Error - detail: An unexpected error occurred + detail: An unexpected error occurred. Our team has been notified. DefaultError: - description: An error occurred + description: An unexpected error occurred content: application/json: schema: diff --git a/docs/static/experimental-llama-stack-spec.json b/docs/static/experimental-llama-stack-spec.json new file mode 100644 index 000000000..cf426d968 --- /dev/null +++ b/docs/static/experimental-llama-stack-spec.json @@ -0,0 +1,5135 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Llama Stack API", + "description": "A comprehensive API for building and deploying AI applications", + "version": "1.0.0" + }, + "servers": [ + { + "url": "https://api.llamastack.com", + "description": "Production server" + }, + { + "url": "https://staging-api.llamastack.com", + "description": "Staging server" + } + ], + "paths": { + "/v1beta/datasetio/append-rows/{dataset_id}": { + "post": { + "tags": [ + "V1Beta" + ], + "summary": "Append rows to a dataset.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "append_rows_v1beta_datasetio_append_rows__dataset_id__post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: dataset_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1beta/datasetio/iterrows/{dataset_id}": { + "get": { + "tags": [ + "V1Beta" + ], + "summary": "Get a paginated list of rows from a dataset.", + "description": "Query endpoint for proper schema generation.", + "operationId": "iterrows_v1beta_datasetio_iterrows__dataset_id__get", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Dataset Id" + } + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1beta/datasets": { + "get": { + "tags": [ + "V1Beta" + ], + "summary": "List all datasets.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_datasets_v1beta_datasets_get", + "responses": { + "200": { + "description": "A ListDatasetsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDatasetsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1Beta" + ], + "summary": "Register a new dataset.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_dataset_v1beta_datasets_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____datasets_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1beta/datasets/{dataset_id}": { + "delete": { + "tags": [ + "V1Beta" + ], + "summary": "Unregister a dataset by its ID.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_dataset_v1beta_datasets__dataset_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: dataset_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Beta" + ], + "summary": "Get a dataset by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_dataset_v1beta_datasets__dataset_id__get", + "parameters": [ + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Dataset Id" + } + } + ], + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "List all agents.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_agents_v1alpha_agents_get", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Create an agent with the given configuration.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_v1alpha_agents_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentConfig" + } + } + } + }, + "responses": { + "200": { + "description": "An AgentCreateResponse with the agent ID.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentCreateResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Delete an agent by its ID and its associated sessions and turns.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_agent_v1alpha_agents__agent_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to delete." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Describe an agent by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agent_v1alpha_agents__agent_id__get", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "ID of the agent." + } + ], + "responses": { + "200": { + "description": "An Agent of the agent.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Agent" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/session": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Create a new session for an agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_session_v1alpha_agents__agent_id__session_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An AgentSessionCreateResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentSessionCreateResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to create the session for." + } + ] + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Delete an agent session by its ID and its associated turns.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_agents_session_v1alpha_agents__agent_id__session__session_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to delete." + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to delete the session for." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Retrieve an agent session by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_session_v1alpha_agents__agent_id__session__session_id__get", + "parameters": [ + { + "name": "turn_ids", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Turn Ids" + } + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get." + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the session for." + } + ], + "responses": { + "200": { + "description": "A Session.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Session" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Create a new turn for an agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_turn_v1alpha_agents__agent_id__session__session_id__turn_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_session_id_turn_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "If stream=False, returns a Turn object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to create the turn for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to create the turn for." + } + ] + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Retrieve an agent turn by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__get", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the turn for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get the turn for." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Turn Id" + }, + "description": "The ID of the turn to get." + } + ], + "responses": { + "200": { + "description": "A Turn.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Resume an agent turn with executed tool call responses.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "resume_agent_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__resume_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_session_id_turn_turn_id_resume_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to resume." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to resume." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the turn to resume." + } + ] + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Retrieve an agent step by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_step_v1alpha_agents__agent_id__session__session_id__turn__turn_id__step__step_id__get", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the step for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get the step for." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Turn Id" + }, + "description": "The ID of the turn to get the step for." + }, + { + "name": "step_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Step Id" + }, + "description": "The ID of the step to get." + } + ], + "responses": { + "200": { + "description": "An AgentStepResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentStepResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/sessions": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "List all session(s) of a given agent.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_agent_sessions_v1alpha_agents__agent_id__sessions_get", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to list sessions for." + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "List all benchmarks.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_benchmarks_v1alpha_eval_benchmarks_get", + "responses": { + "200": { + "description": "A ListBenchmarksResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBenchmarksResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Register a benchmark.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_benchmark_v1alpha_eval_benchmarks_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Unregister a benchmark.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_benchmark_v1alpha_eval_benchmarks__benchmark_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to unregister." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get a benchmark by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_benchmark_v1alpha_eval_benchmarks__benchmark_id__get", + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to get." + } + ], + "responses": { + "200": { + "description": "A Benchmark.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Benchmark" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/evaluations": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Evaluate a list of rows on a benchmark.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "evaluate_rows_v1alpha_eval_benchmarks__benchmark_id__evaluations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenchmarkConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "EvaluateResponse object containing generations and scores.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvaluateResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + } + ] + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/jobs": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Run an evaluation on a benchmark.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_eval_v1alpha_eval_benchmarks__benchmark_id__jobs_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenchmarkConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The job that was created to run the evaluation.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + } + ] + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Cancel a job.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "job_cancel_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the job to cancel." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the status of a job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "job_status_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__get", + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Job Id" + }, + "description": "The ID of the job to get the status of." + } + ], + "responses": { + "200": { + "description": "The status of the evaluation job.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the result of a job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "job_result_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__result_get", + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Job Id" + }, + "description": "The ID of the job to get the result of." + } + ], + "responses": { + "200": { + "description": "The result of the job.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvaluateResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/inference/rerank": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Rerank a list of documents based on their relevance to a query.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "rerank_v1alpha_inference_rerank_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_inference_rerank_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "RerankResponse with indices sorted by relevance score (descending).", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RerankResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1alpha/post-training/job/artifacts": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the artifacts of a training job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_training_job_artifacts_v1alpha_post_training_job_artifacts_get", + "parameters": [ + { + "name": "job_uuid", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Job Uuid" + } + } + ], + "responses": { + "200": { + "description": "A PostTrainingJobArtifactsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJobArtifactsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/post-training/job/cancel": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Cancel a training job.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "cancel_training_job_v1alpha_post_training_job_cancel_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/post-training/job/status": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the status of a training job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_training_job_status_v1alpha_post_training_job_status_get", + "parameters": [ + { + "name": "job_uuid", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Job Uuid" + } + } + ], + "responses": { + "200": { + "description": "A PostTrainingJobStatusResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJobStatusResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/post-training/jobs": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get all training jobs.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "get_training_jobs_v1alpha_post_training_jobs_get", + "responses": { + "200": { + "description": "A ListPostTrainingJobsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPostTrainingJobsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1alpha/post-training/preference-optimize": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Run preference optimization of a model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "preference_optimize_v1alpha_post_training_preference_optimize_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DPOAlignmentConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A PostTrainingJob.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJob" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1alpha/post-training/supervised-fine-tune": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Run supervised fine-tuning of a model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "supervised_fine_tune_v1alpha_post_training_supervised_fine_tune_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrainingConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A PostTrainingJob.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJob" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + } + }, + "components": { + "schemas": { + "AgentCandidate": { + "properties": { + "type": { + "type": "string", + "const": "agent", + "title": "Type", + "default": "agent" + }, + "config": { + "$ref": "#/components/schemas/AgentConfig" + } + }, + "type": "object", + "required": [ + "config" + ], + "title": "AgentCandidate", + "description": "An agent candidate for evaluation." + }, + "AgentConfig": { + "properties": { + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "input_shields": { + "title": "Input Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "output_shields": { + "title": "Output Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "toolgroups": { + "title": "Toolgroups", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ] + }, + "type": "array" + }, + "client_tools": { + "title": "Client Tools", + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "type": "array" + }, + "tool_choice": { + "deprecated": true, + "$ref": "#/components/schemas/ToolChoice" + }, + "tool_prompt_format": { + "deprecated": true, + "$ref": "#/components/schemas/ToolPromptFormat" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + }, + "max_infer_iters": { + "title": "Max Infer Iters", + "default": 10, + "type": "integer" + }, + "model": { + "type": "string", + "title": "Model" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "name": { + "title": "Name", + "type": "string" + }, + "enable_session_persistence": { + "title": "Enable Session Persistence", + "default": false, + "type": "boolean" + }, + "response_format": { + "title": "Response Format", + "oneOf": [ + { + "$ref": "#/components/schemas/JsonSchemaResponseFormat" + }, + { + "$ref": "#/components/schemas/GrammarResponseFormat" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "grammar": "#/components/schemas/GrammarResponseFormat", + "json_schema": "#/components/schemas/JsonSchemaResponseFormat" + } + } + } + }, + "type": "object", + "required": [ + "model", + "instructions" + ], + "title": "AgentConfig", + "description": "Configuration for an agent." + }, + "AgentCreateResponse": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + } + }, + "type": "object", + "required": [ + "agent_id" + ], + "title": "AgentCreateResponse", + "description": "Response returned when creating a new agent." + }, + "AgentSessionCreateResponse": { + "properties": { + "session_id": { + "type": "string", + "title": "Session Id" + } + }, + "type": "object", + "required": [ + "session_id" + ], + "title": "AgentSessionCreateResponse", + "description": "Response returned when creating a new agent session." + }, + "AgentToolGroupWithArgs": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "args": { + "additionalProperties": true, + "type": "object", + "title": "Args" + } + }, + "type": "object", + "required": [ + "name", + "args" + ], + "title": "AgentToolGroupWithArgs" + }, + "AggregationFunctionType": { + "type": "string", + "enum": [ + "average", + "weighted_average", + "median", + "categorical_count", + "accuracy" + ], + "title": "AggregationFunctionType", + "description": "Types of aggregation functions for scoring results." + }, + "Attachment-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Attachment", + "description": "An attachment to an agent turn." + }, + "BasicScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "basic", + "title": "Type", + "default": "basic" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "BasicScoringFnParams", + "description": "Parameters for basic scoring function configuration." + }, + "Benchmark": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "benchmark", + "title": "Type", + "default": "benchmark" + }, + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "scoring_functions": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Scoring Functions" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Metadata for this evaluation task" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "dataset_id", + "scoring_functions" + ], + "title": "Benchmark", + "description": "A benchmark resource for evaluating model performance." + }, + "BenchmarkConfig": { + "properties": { + "eval_candidate": { + "oneOf": [ + { + "$ref": "#/components/schemas/ModelCandidate" + }, + { + "$ref": "#/components/schemas/AgentCandidate" + } + ], + "title": "Eval Candidate", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent": "#/components/schemas/AgentCandidate", + "model": "#/components/schemas/ModelCandidate" + } + } + }, + "scoring_params": { + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + }, + "type": "object", + "title": "Scoring Params", + "description": "Map between scoring function id and parameters for each scoring function you want to run" + }, + "num_examples": { + "title": "Num Examples", + "description": "Number of examples to evaluate (useful for testing), if not provided, all examples in the dataset will be evaluated", + "type": "integer" + } + }, + "type": "object", + "required": [ + "eval_candidate" + ], + "title": "BenchmarkConfig", + "description": "A benchmark configuration for evaluation." + }, + "BuiltinTool": { + "type": "string", + "enum": [ + "brave_search", + "wolfram_alpha", + "photogen", + "code_interpreter" + ], + "title": "BuiltinTool" + }, + "CompletionMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "description": "A message containing the model's (assistant) response in a chat conversation." + }, + "DPOAlignmentConfig": { + "properties": { + "beta": { + "type": "number", + "title": "Beta" + }, + "loss_type": { + "$ref": "#/components/schemas/DPOLossType", + "default": "sigmoid" + } + }, + "type": "object", + "required": [ + "beta" + ], + "title": "DPOAlignmentConfig", + "description": "Configuration for Direct Preference Optimization (DPO) alignment." + }, + "DPOLossType": { + "type": "string", + "enum": [ + "sigmoid", + "hinge", + "ipo", + "kto_pair" + ], + "title": "DPOLossType" + }, + "DataConfig": { + "properties": { + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "batch_size": { + "type": "integer", + "title": "Batch Size" + }, + "shuffle": { + "type": "boolean", + "title": "Shuffle" + }, + "data_format": { + "$ref": "#/components/schemas/DatasetFormat" + }, + "validation_dataset_id": { + "title": "Validation Dataset Id", + "type": "string" + }, + "packed": { + "title": "Packed", + "default": false, + "type": "boolean" + }, + "train_on_input": { + "title": "Train On Input", + "default": false, + "type": "boolean" + } + }, + "type": "object", + "required": [ + "dataset_id", + "batch_size", + "shuffle", + "data_format" + ], + "title": "DataConfig", + "description": "Configuration for training data and data loading." + }, + "Dataset": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "dataset", + "title": "Type", + "default": "dataset" + }, + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "source": { + "oneOf": [ + { + "$ref": "#/components/schemas/URIDataSource" + }, + { + "$ref": "#/components/schemas/RowsDataSource" + } + ], + "title": "Source", + "discriminator": { + "propertyName": "type", + "mapping": { + "rows": "#/components/schemas/RowsDataSource", + "uri": "#/components/schemas/URIDataSource" + } + } + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this dataset" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "purpose", + "source" + ], + "title": "Dataset", + "description": "Dataset resource for storing and accessing training or evaluation data." + }, + "DatasetFormat": { + "type": "string", + "enum": [ + "instruct", + "dialog" + ], + "title": "DatasetFormat", + "description": "Format of the training dataset." + }, + "DatasetPurpose": { + "type": "string", + "enum": [ + "post-training/messages", + "eval/question-answer", + "eval/messages-answer" + ], + "title": "DatasetPurpose", + "description": "Purpose of the dataset. Each purpose has a required input data schema." + }, + "Document": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Document", + "description": "A document to be used by an agent." + }, + "EfficiencyConfig": { + "properties": { + "enable_activation_checkpointing": { + "title": "Enable Activation Checkpointing", + "default": false, + "type": "boolean" + }, + "enable_activation_offloading": { + "title": "Enable Activation Offloading", + "default": false, + "type": "boolean" + }, + "memory_efficient_fsdp_wrap": { + "title": "Memory Efficient Fsdp Wrap", + "default": false, + "type": "boolean" + }, + "fsdp_cpu_offload": { + "title": "Fsdp Cpu Offload", + "default": false, + "type": "boolean" + } + }, + "type": "object", + "title": "EfficiencyConfig", + "description": "Configuration for memory and compute efficiency optimizations." + }, + "EvaluateResponse": { + "properties": { + "generations": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Generations" + }, + "scores": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Scores" + } + }, + "type": "object", + "required": [ + "generations", + "scores" + ], + "title": "EvaluateResponse", + "description": "The response from an evaluation." + }, + "GrammarResponseFormat": { + "properties": { + "type": { + "type": "string", + "const": "grammar", + "title": "Type", + "default": "grammar" + }, + "bnf": { + "additionalProperties": true, + "type": "object", + "title": "Bnf" + } + }, + "type": "object", + "required": [ + "bnf" + ], + "title": "GrammarResponseFormat", + "description": "Configuration for grammar-guided response generation." + }, + "GreedySamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "greedy", + "title": "Type", + "default": "greedy" + } + }, + "type": "object", + "title": "GreedySamplingStrategy", + "description": "Greedy sampling strategy that selects the highest probability token at each step." + }, + "ImageContentItem-Input": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "ImageContentItem-Output": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "InferenceStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "inference", + "title": "Step Type", + "default": "inference" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage-Output" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "description": "An inference step in an agent turn." + }, + "Job": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + } + }, + "type": "object", + "required": [ + "job_id", + "status" + ], + "title": "Job", + "description": "A job execution instance with status tracking." + }, + "JobStatus": { + "type": "string", + "enum": [ + "completed", + "in_progress", + "failed", + "scheduled", + "cancelled" + ], + "title": "JobStatus", + "description": "Status of a job execution." + }, + "JsonSchemaResponseFormat": { + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "title": "Type", + "default": "json_schema" + }, + "json_schema": { + "additionalProperties": true, + "type": "object", + "title": "Json Schema" + } + }, + "type": "object", + "required": [ + "json_schema" + ], + "title": "JsonSchemaResponseFormat", + "description": "Configuration for JSON schema-guided response generation." + }, + "LLMAsJudgeScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "llm_as_judge", + "title": "Type", + "default": "llm_as_judge" + }, + "judge_model": { + "type": "string", + "title": "Judge Model" + }, + "prompt_template": { + "title": "Prompt Template", + "type": "string" + }, + "judge_score_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Judge Score Regexes", + "description": "Regexes to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "required": [ + "judge_model" + ], + "title": "LLMAsJudgeScoringFnParams", + "description": "Parameters for LLM-as-judge scoring function configuration." + }, + "ListBenchmarksResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Benchmark" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListBenchmarksResponse" + }, + "ListDatasetsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Dataset" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListDatasetsResponse", + "description": "Response from listing datasets." + }, + "ListPostTrainingJobsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/PostTrainingJob" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListPostTrainingJobsResponse" + }, + "MemoryRetrievalStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "memory_retrieval", + "title": "Step Type", + "default": "memory_retrieval" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "description": "A memory retrieval step in an agent turn." + }, + "ModelCandidate": { + "properties": { + "type": { + "type": "string", + "const": "model", + "title": "Type", + "default": "model" + }, + "model": { + "type": "string", + "title": "Model" + }, + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "system_message": { + "$ref": "#/components/schemas/SystemMessage" + } + }, + "type": "object", + "required": [ + "model", + "sampling_params" + ], + "title": "ModelCandidate", + "description": "A model candidate for evaluation." + }, + "OptimizerConfig": { + "properties": { + "optimizer_type": { + "$ref": "#/components/schemas/OptimizerType" + }, + "lr": { + "type": "number", + "title": "Lr" + }, + "weight_decay": { + "type": "number", + "title": "Weight Decay" + }, + "num_warmup_steps": { + "type": "integer", + "title": "Num Warmup Steps" + } + }, + "type": "object", + "required": [ + "optimizer_type", + "lr", + "weight_decay", + "num_warmup_steps" + ], + "title": "OptimizerConfig", + "description": "Configuration parameters for the optimization algorithm." + }, + "OptimizerType": { + "type": "string", + "enum": [ + "adam", + "adamw", + "sgd" + ], + "title": "OptimizerType", + "description": "Available optimizer algorithms for training." + }, + "PostTrainingJob": { + "properties": { + "job_uuid": { + "type": "string", + "title": "Job Uuid" + } + }, + "type": "object", + "required": [ + "job_uuid" + ], + "title": "PostTrainingJob" + }, + "RegexParserScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "regex_parser", + "title": "Type", + "default": "regex_parser" + }, + "parsing_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Parsing Regexes", + "description": "Regex to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "RegexParserScoringFnParams", + "description": "Parameters for regex parser scoring function configuration." + }, + "RerankData": { + "properties": { + "index": { + "type": "integer", + "title": "Index" + }, + "relevance_score": { + "type": "number", + "title": "Relevance Score" + } + }, + "type": "object", + "required": [ + "index", + "relevance_score" + ], + "title": "RerankData", + "description": "A single rerank result from a reranking response." + }, + "RerankResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/RerankData" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "RerankResponse", + "description": "Response from a reranking request." + }, + "RowsDataSource": { + "properties": { + "type": { + "type": "string", + "const": "rows", + "title": "Type", + "default": "rows" + }, + "rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Rows" + } + }, + "type": "object", + "required": [ + "rows" + ], + "title": "RowsDataSource", + "description": "A dataset stored in rows." + }, + "SafetyViolation": { + "properties": { + "violation_level": { + "$ref": "#/components/schemas/ViolationLevel" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "violation_level" + ], + "title": "SafetyViolation", + "description": "Details of a safety violation detected by content moderation." + }, + "SamplingParams": { + "properties": { + "strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/GreedySamplingStrategy" + }, + { + "$ref": "#/components/schemas/TopPSamplingStrategy" + }, + { + "$ref": "#/components/schemas/TopKSamplingStrategy" + } + ], + "title": "Strategy", + "discriminator": { + "propertyName": "type", + "mapping": { + "greedy": "#/components/schemas/GreedySamplingStrategy", + "top_k": "#/components/schemas/TopKSamplingStrategy", + "top_p": "#/components/schemas/TopPSamplingStrategy" + } + } + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "default": 1.0, + "type": "number" + }, + "stop": { + "title": "Stop", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "SamplingParams", + "description": "Sampling parameters." + }, + "ScoringResult": { + "properties": { + "score_rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Score Rows" + }, + "aggregated_results": { + "additionalProperties": true, + "type": "object", + "title": "Aggregated Results" + } + }, + "type": "object", + "required": [ + "score_rows", + "aggregated_results" + ], + "title": "ScoringResult", + "description": "A scoring result for a single row." + }, + "ShieldCallStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "shield_call", + "title": "Step Type", + "default": "shield_call" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "description": "A shield call step in an agent turn." + }, + "StopReason": { + "type": "string", + "enum": [ + "end_of_turn", + "end_of_message", + "out_of_tokens" + ], + "title": "StopReason" + }, + "SystemMessage": { + "properties": { + "role": { + "type": "string", + "const": "system", + "title": "Role", + "default": "system" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "SystemMessage", + "description": "A system message providing instructions or context to the model." + }, + "SystemMessageBehavior": { + "type": "string", + "enum": [ + "append", + "replace" + ], + "title": "SystemMessageBehavior", + "description": "Config for how to override the default system prompt." + }, + "TextContentItem": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "TextContentItem", + "description": "A text content item" + }, + "ToolCall": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "arguments": { + "type": "string", + "title": "Arguments" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "arguments" + ], + "title": "ToolCall" + }, + "ToolChoice": { + "type": "string", + "enum": [ + "auto", + "required", + "none" + ], + "title": "ToolChoice", + "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model." + }, + "ToolConfig": { + "properties": { + "tool_choice": { + "anyOf": [ + { + "$ref": "#/components/schemas/ToolChoice" + }, + { + "type": "string" + } + ], + "title": "Tool Choice", + "default": "auto" + }, + "tool_prompt_format": { + "$ref": "#/components/schemas/ToolPromptFormat" + }, + "system_message_behavior": { + "default": "append", + "$ref": "#/components/schemas/SystemMessageBehavior" + } + }, + "type": "object", + "title": "ToolConfig", + "description": "Configuration for tool use." + }, + "ToolDef": { + "properties": { + "toolgroup_id": { + "title": "Toolgroup Id", + "type": "string" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "input_schema": { + "title": "Input Schema", + "additionalProperties": true, + "type": "object" + }, + "output_schema": { + "title": "Output Schema", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ToolDef", + "description": "Tool definition used in runtime contexts." + }, + "ToolExecutionStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "tool_execution", + "title": "Step Type", + "default": "tool_execution" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array", + "title": "Tool Calls" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse-Output" + }, + "type": "array", + "title": "Tool Responses" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "description": "A tool execution step in an agent turn." + }, + "ToolPromptFormat": { + "type": "string", + "enum": [ + "json", + "function_tag", + "python_list" + ], + "title": "ToolPromptFormat", + "description": "Prompt format for calling custom / zero shot tools." + }, + "ToolResponse-Input": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "description": "Response from a tool invocation." + }, + "ToolResponse-Output": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "description": "Response from a tool invocation." + }, + "ToolResponseMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "call_id": { + "type": "string", + "title": "Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "call_id", + "content" + ], + "title": "ToolResponseMessage", + "description": "A message representing the result of a tool invocation." + }, + "TopKSamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "top_k", + "title": "Type", + "default": "top_k" + }, + "top_k": { + "type": "integer", + "minimum": 1.0, + "title": "Top K" + } + }, + "type": "object", + "required": [ + "top_k" + ], + "title": "TopKSamplingStrategy", + "description": "Top-k sampling strategy that restricts sampling to the k most likely tokens." + }, + "TopPSamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "top_p", + "title": "Type", + "default": "top_p" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "minimum": 0.0 + }, + "top_p": { + "title": "Top P", + "default": 0.95, + "type": "number" + } + }, + "type": "object", + "required": [ + "temperature" + ], + "title": "TopPSamplingStrategy", + "description": "Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p." + }, + "TrainingConfig": { + "properties": { + "n_epochs": { + "type": "integer", + "title": "N Epochs" + }, + "max_steps_per_epoch": { + "type": "integer", + "title": "Max Steps Per Epoch", + "default": 1 + }, + "gradient_accumulation_steps": { + "type": "integer", + "title": "Gradient Accumulation Steps", + "default": 1 + }, + "max_validation_steps": { + "title": "Max Validation Steps", + "default": 1, + "type": "integer" + }, + "data_config": { + "$ref": "#/components/schemas/DataConfig" + }, + "optimizer_config": { + "$ref": "#/components/schemas/OptimizerConfig" + }, + "efficiency_config": { + "$ref": "#/components/schemas/EfficiencyConfig" + }, + "dtype": { + "title": "Dtype", + "default": "bf16", + "type": "string" + } + }, + "type": "object", + "required": [ + "n_epochs" + ], + "title": "TrainingConfig", + "description": "Comprehensive configuration for the training process." + }, + "Turn": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "input_messages": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserMessage-Output" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage-Output" + } + ] + }, + "type": "array", + "title": "Input Messages" + }, + "steps": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep-Output" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep-Output" + }, + { + "$ref": "#/components/schemas/ShieldCallStep-Output" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep-Output" + } + ], + "discriminator": { + "propertyName": "step_type", + "mapping": { + "inference": "#/components/schemas/InferenceStep-Output", + "memory_retrieval": "#/components/schemas/MemoryRetrievalStep-Output", + "shield_call": "#/components/schemas/ShieldCallStep-Output", + "tool_execution": "#/components/schemas/ToolExecutionStep-Output" + } + } + }, + "type": "array", + "title": "Steps" + }, + "output_message": { + "$ref": "#/components/schemas/CompletionMessage-Output" + }, + "output_attachments": { + "title": "Output Attachments", + "items": { + "$ref": "#/components/schemas/Attachment-Output" + }, + "type": "array" + }, + "started_at": { + "type": "string", + "format": "date-time", + "title": "Started At" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + } + }, + "type": "object", + "required": [ + "turn_id", + "session_id", + "input_messages", + "steps", + "output_message", + "started_at" + ], + "title": "Turn", + "description": "A single turn in an interaction with an Agentic System." + }, + "URIDataSource": { + "properties": { + "type": { + "type": "string", + "const": "uri", + "title": "Type", + "default": "uri" + }, + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URIDataSource", + "description": "A dataset that can be obtained from a URI." + }, + "URL": { + "properties": { + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URL", + "description": "A URL reference to external content." + }, + "UserMessage-Input": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Context" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "UserMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Context" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "ViolationLevel": { + "type": "string", + "enum": [ + "info", + "warn", + "error" + ], + "title": "ViolationLevel", + "description": "Severity level of a safety violation." + }, + "_URLOrData": { + "properties": { + "url": { + "$ref": "#/components/schemas/URL" + }, + "data": { + "contentEncoding": "base64", + "title": "Data", + "type": "string" + } + }, + "type": "object", + "title": "_URLOrData", + "description": "A URL or a base64 encoded string" + }, + "__main_____agents_agent_id_session_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_name": { + "type": "string", + "title": "Session Name" + } + }, + "type": "object", + "required": [ + "agent_id", + "session_name" + ], + "title": "_agents_agent_id_session_Request" + }, + "__main_____agents_agent_id_session_session_id_turn_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "messages": { + "$ref": "#/components/schemas/UserMessage-Input" + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "documents": { + "$ref": "#/components/schemas/Document" + }, + "toolgroups": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ], + "title": "Toolgroups" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + } + }, + "type": "object", + "required": [ + "agent_id", + "session_id", + "messages", + "documents", + "toolgroups", + "tool_config" + ], + "title": "_agents_agent_id_session_session_id_turn_Request" + }, + "__main_____agents_agent_id_session_session_id_turn_turn_id_resume_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "tool_responses": { + "$ref": "#/components/schemas/ToolResponse-Input" + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + } + }, + "type": "object", + "required": [ + "agent_id", + "session_id", + "turn_id", + "tool_responses" + ], + "title": "_agents_agent_id_session_session_id_turn_turn_id_resume_Request" + }, + "__main_____datasets_Request": { + "properties": { + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "dataset_id": { + "type": "string", + "title": "Dataset Id" + } + }, + "type": "object", + "required": [ + "purpose", + "metadata", + "dataset_id" + ], + "title": "_datasets_Request" + }, + "_inference_rerank_Request": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "query": { + "type": "string", + "title": "Query" + }, + "items": { + "type": "string", + "title": "Items" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results" + } + }, + "type": "object", + "required": [ + "model", + "query", + "items", + "max_num_results" + ], + "title": "_inference_rerank_Request" + }, + "Error": { + "description": "Error response from the API. Roughly follows RFC 7807.", + "properties": { + "status": { + "title": "Status", + "type": "integer" + }, + "title": { + "title": "Title", + "type": "string" + }, + "detail": { + "title": "Detail", + "type": "string" + }, + "instance": { + "title": "Instance", + "type": "string", + "nullable": true + } + }, + "required": [ + "status", + "title", + "detail" + ], + "title": "Error", + "type": "object" + }, + "Agent": { + "description": "An agent instance with configuration and metadata.", + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "agent_config": { + "$ref": "#/components/schemas/AgentConfig" + }, + "created_at": { + "format": "date-time", + "title": "Created At", + "type": "string" + } + }, + "required": [ + "agent_id", + "agent_config", + "created_at" + ], + "title": "Agent", + "type": "object" + }, + "AgentStepResponse": { + "description": "Response containing details of a specific agent step.", + "properties": { + "step": { + "discriminator": { + "mapping": { + "inference": "#/$defs/InferenceStep", + "memory_retrieval": "#/$defs/MemoryRetrievalStep", + "shield_call": "#/$defs/ShieldCallStep", + "tool_execution": "#/$defs/ToolExecutionStep" + }, + "propertyName": "step_type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep" + }, + { + "$ref": "#/components/schemas/ShieldCallStep" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep" + } + ], + "title": "Step" + } + }, + "required": [ + "step" + ], + "title": "AgentStepResponse", + "type": "object" + }, + "CompletionMessage": { + "description": "A message containing the model's (assistant) response in a chat conversation.", + "properties": { + "role": { + "const": "assistant", + "default": "assistant", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "type": "object" + }, + "InferenceStep": { + "description": "An inference step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "inference", + "default": "inference", + "title": "Step Type", + "type": "string" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage" + } + }, + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "type": "object" + }, + "MemoryRetrievalStep": { + "description": "A memory retrieval step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "memory_retrieval", + "default": "memory_retrieval", + "title": "Step Type", + "type": "string" + }, + "vector_store_ids": { + "title": "Vector Store Ids", + "type": "string" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "type": "object" + }, + "PaginatedResponse": { + "description": "A generic paginated response that follows a simple format.", + "properties": { + "data": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "url": { + "title": "Url", + "type": "string", + "nullable": true + } + }, + "required": [ + "data", + "has_more" + ], + "title": "PaginatedResponse", + "type": "object" + }, + "Session": { + "description": "A single session of an interaction with an Agentic System.", + "properties": { + "session_id": { + "title": "Session Id", + "type": "string" + }, + "session_name": { + "title": "Session Name", + "type": "string" + }, + "turns": { + "items": { + "$ref": "#/components/schemas/Turn" + }, + "title": "Turns", + "type": "array" + }, + "started_at": { + "format": "date-time", + "title": "Started At", + "type": "string" + } + }, + "required": [ + "session_id", + "session_name", + "turns", + "started_at" + ], + "title": "Session", + "type": "object" + }, + "ShieldCallStep": { + "description": "A shield call step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "shield_call", + "default": "shield_call", + "title": "Step Type", + "type": "string" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "type": "object" + }, + "ToolExecutionStep": { + "description": "A tool execution step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "tool_execution", + "default": "tool_execution", + "title": "Step Type", + "type": "string" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "title": "Tool Calls", + "type": "array" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse" + }, + "title": "Tool Responses", + "type": "array" + } + }, + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "type": "object" + }, + "ToolResponse": { + "description": "Response from a tool invocation.", + "properties": { + "call_id": { + "title": "Call Id", + "type": "string" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "type": "object" + }, + "Checkpoint": { + "description": "Checkpoint created during training runs.", + "properties": { + "identifier": { + "title": "Identifier", + "type": "string" + }, + "created_at": { + "format": "date-time", + "title": "Created At", + "type": "string" + }, + "epoch": { + "title": "Epoch", + "type": "integer" + }, + "post_training_job_id": { + "title": "Post Training Job Id", + "type": "string" + }, + "path": { + "title": "Path", + "type": "string" + }, + "training_metrics": { + "$ref": "#/components/schemas/PostTrainingMetric", + "nullable": true + } + }, + "required": [ + "identifier", + "created_at", + "epoch", + "post_training_job_id", + "path" + ], + "title": "Checkpoint", + "type": "object" + }, + "PostTrainingJobArtifactsResponse": { + "description": "Artifacts of a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "checkpoints": { + "items": { + "$ref": "#/components/schemas/Checkpoint" + }, + "title": "Checkpoints", + "type": "array" + } + }, + "required": [ + "job_uuid" + ], + "title": "PostTrainingJobArtifactsResponse", + "type": "object" + }, + "PostTrainingJobStatusResponse": { + "description": "Status of a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + }, + "scheduled_at": { + "title": "Scheduled At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "resources_allocated": { + "title": "Resources Allocated", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "checkpoints": { + "items": { + "$ref": "#/components/schemas/Checkpoint" + }, + "title": "Checkpoints", + "type": "array" + } + }, + "required": [ + "job_uuid", + "status" + ], + "title": "PostTrainingJobStatusResponse", + "type": "object" + }, + "ImageContentItem": { + "description": "A image content item", + "properties": { + "type": { + "const": "image", + "default": "image", + "title": "Type", + "type": "string" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "required": [ + "image" + ], + "title": "ImageContentItem", + "type": "object" + }, + "PostTrainingMetric": { + "description": "Training metrics captured during post-training jobs.", + "properties": { + "epoch": { + "title": "Epoch", + "type": "integer" + }, + "train_loss": { + "title": "Train Loss", + "type": "number" + }, + "validation_loss": { + "title": "Validation Loss", + "type": "number" + }, + "perplexity": { + "title": "Perplexity", + "type": "number" + } + }, + "required": [ + "epoch", + "train_loss", + "validation_loss", + "perplexity" + ], + "title": "PostTrainingMetric", + "type": "object" + } + }, + "responses": { + "BadRequest400": { + "description": "The request was invalid or malformed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 400, + "title": "Bad Request", + "detail": "The request was invalid or malformed" + } + } + } + }, + "TooManyRequests429": { + "description": "The client has sent too many requests in a given amount of time", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 429, + "title": "Too Many Requests", + "detail": "You have exceeded the rate limit. Please try again later." + } + } + } + }, + "InternalServerError500": { + "description": "The server encountered an unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 500, + "title": "Internal Server Error", + "detail": "An unexpected error occurred. Our team has been notified." + } + } + } + }, + "DefaultError": { + "description": "An unexpected error occurred", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/static/experimental-llama-stack-spec.yaml b/docs/static/experimental-llama-stack-spec.yaml index 204cc9e02..cd7cbdb23 100644 --- a/docs/static/experimental-llama-stack-spec.yaml +++ b/docs/static/experimental-llama-stack-spec.yaml @@ -13,7 +13,7 @@ paths: post: tags: - V1Beta - summary: Append Rows + summary: Append rows to a dataset. description: Generic endpoint - this would be replaced with actual implementation. operationId: append_rows_v1beta_datasetio_append_rows__dataset_id__post parameters: @@ -55,16 +55,10 @@ paths: get: tags: - V1Beta - summary: Iterrows + summary: Get a paginated list of rows from a dataset. description: Query endpoint for proper schema generation. operationId: iterrows_v1beta_datasetio_iterrows__dataset_id__get parameters: - - name: dataset_id - in: path - required: true - schema: - type: string - title: Dataset Id - name: limit in: query required: true @@ -77,6 +71,12 @@ paths: schema: type: integer title: Start Index + - name: dataset_id + in: path + required: true + schema: + type: string + title: Dataset Id responses: '200': description: A PaginatedResponse. @@ -100,7 +100,7 @@ paths: get: tags: - V1Beta - summary: List Datasets + summary: List all datasets. description: Response-only endpoint for proper schema generation. operationId: list_datasets_v1beta_datasets_get responses: @@ -125,7 +125,7 @@ paths: post: tags: - V1Beta - summary: Register Dataset + summary: Register a new dataset. description: Typed endpoint for proper schema generation. operationId: register_dataset_v1beta_datasets_post requestBody: @@ -157,7 +157,7 @@ paths: delete: tags: - V1Beta - summary: Unregister Dataset + summary: Unregister a dataset by its ID. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_dataset_v1beta_datasets__dataset_id__delete parameters: @@ -198,7 +198,7 @@ paths: get: tags: - V1Beta - summary: Get Dataset + summary: Get a dataset by its ID. description: Query endpoint for proper schema generation. operationId: get_dataset_v1beta_datasets__dataset_id__get parameters: @@ -231,7 +231,7 @@ paths: get: tags: - V1Alpha - summary: List Agents + summary: List all agents. description: Query endpoint for proper schema generation. operationId: list_agents_v1alpha_agents_get parameters: @@ -269,7 +269,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent + summary: Create an agent with the given configuration. description: Typed endpoint for proper schema generation. operationId: create_agent_v1alpha_agents_post requestBody: @@ -301,7 +301,7 @@ paths: delete: tags: - V1Alpha - summary: Delete Agent + summary: Delete an agent by its ID and its associated sessions and turns. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_agent_v1alpha_agents__agent_id__delete parameters: @@ -320,7 +320,7 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to delete. responses: '200': description: Successful Response @@ -342,7 +342,7 @@ paths: get: tags: - V1Alpha - summary: Get Agent + summary: Describe an agent by its ID. description: Query endpoint for proper schema generation. operationId: get_agent_v1alpha_agents__agent_id__get parameters: @@ -352,6 +352,7 @@ paths: schema: type: string title: Agent Id + description: ID of the agent. responses: '200': description: An Agent of the agent. @@ -375,7 +376,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent Session + summary: Create a new session for an agent. description: Typed endpoint for proper schema generation. operationId: create_agent_session_v1alpha_agents__agent_id__session_post requestBody: @@ -409,12 +410,12 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to create the session for. /v1alpha/agents/{agent_id}/session/{session_id}: delete: tags: - V1Alpha - summary: Delete Agents Session + summary: Delete an agent session by its ID and its associated turns. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_agents_session_v1alpha_agents__agent_id__session__session_id__delete parameters: @@ -428,18 +429,18 @@ paths: required: true schema: title: Kwargs - - name: agent_id - in: path - required: true - schema: - type: string - description: 'Path parameter: agent_id' - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to delete. + - name: agent_id + in: path + required: true + schema: + type: string + description: The ID of the agent to delete the session for. responses: '200': description: Successful Response @@ -461,28 +462,30 @@ paths: get: tags: - V1Alpha - summary: Get Agents Session + summary: Retrieve an agent session by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_session_v1alpha_agents__agent_id__session__session_id__get parameters: - - name: agent_id - in: path - required: true - schema: - type: string - title: Agent Id - - name: session_id - in: path - required: true - schema: - type: string - title: Session Id - name: turn_ids in: query required: true schema: type: string title: Turn Ids + - name: session_id + in: path + required: true + schema: + type: string + title: Session Id + description: The ID of the session to get. + - name: agent_id + in: path + required: true + schema: + type: string + title: Agent Id + description: The ID of the agent to get the session for. responses: '200': description: A Session. @@ -506,7 +509,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent Turn + summary: Create a new turn for an agent. description: Typed endpoint for proper schema generation. operationId: create_agent_turn_v1alpha_agents__agent_id__session__session_id__turn_post requestBody: @@ -540,18 +543,18 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to create the turn for. - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to create the turn for. /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}: get: tags: - V1Alpha - summary: Get Agents Turn + summary: Retrieve an agent turn by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__get parameters: @@ -561,18 +564,21 @@ paths: schema: type: string title: Agent Id + description: The ID of the agent to get the turn for. - name: session_id in: path required: true schema: type: string title: Session Id + description: The ID of the session to get the turn for. - name: turn_id in: path required: true schema: type: string title: Turn Id + description: The ID of the turn to get. responses: '200': description: A Turn. @@ -596,7 +602,7 @@ paths: post: tags: - V1Alpha - summary: Resume Agent Turn + summary: Resume an agent turn with executed tool call responses. description: Typed endpoint for proper schema generation. operationId: resume_agent_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__resume_post requestBody: @@ -631,24 +637,24 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to resume. - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to resume. - name: turn_id in: path required: true schema: type: string - description: 'Path parameter: turn_id' + description: The ID of the turn to resume. /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: get: tags: - V1Alpha - summary: Get Agents Step + summary: Retrieve an agent step by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_step_v1alpha_agents__agent_id__session__session_id__turn__turn_id__step__step_id__get parameters: @@ -658,24 +664,28 @@ paths: schema: type: string title: Agent Id + description: The ID of the agent to get the step for. - name: session_id in: path required: true schema: type: string title: Session Id - - name: step_id - in: path - required: true - schema: - type: string - title: Step Id + description: The ID of the session to get the step for. - name: turn_id in: path required: true schema: type: string title: Turn Id + description: The ID of the turn to get the step for. + - name: step_id + in: path + required: true + schema: + type: string + title: Step Id + description: The ID of the step to get. responses: '200': description: An AgentStepResponse. @@ -699,16 +709,10 @@ paths: get: tags: - V1Alpha - summary: List Agent Sessions + summary: List all session(s) of a given agent. description: Query endpoint for proper schema generation. operationId: list_agent_sessions_v1alpha_agents__agent_id__sessions_get parameters: - - name: agent_id - in: path - required: true - schema: - type: string - title: Agent Id - name: limit in: query required: true @@ -721,6 +725,13 @@ paths: schema: type: integer title: Start Index + - name: agent_id + in: path + required: true + schema: + type: string + title: Agent Id + description: The ID of the agent to list sessions for. responses: '200': description: A PaginatedResponse. @@ -744,7 +755,7 @@ paths: get: tags: - V1Alpha - summary: List Benchmarks + summary: List all benchmarks. description: Response-only endpoint for proper schema generation. operationId: list_benchmarks_v1alpha_eval_benchmarks_get responses: @@ -769,7 +780,7 @@ paths: post: tags: - V1Alpha - summary: Register Benchmark + summary: Register a benchmark. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_benchmark_v1alpha_eval_benchmarks_post parameters: @@ -805,7 +816,7 @@ paths: delete: tags: - V1Alpha - summary: Unregister Benchmark + summary: Unregister a benchmark. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_benchmark_v1alpha_eval_benchmarks__benchmark_id__delete parameters: @@ -824,7 +835,7 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to unregister. responses: '200': description: Successful Response @@ -846,7 +857,7 @@ paths: get: tags: - V1Alpha - summary: Get Benchmark + summary: Get a benchmark by its ID. description: Query endpoint for proper schema generation. operationId: get_benchmark_v1alpha_eval_benchmarks__benchmark_id__get parameters: @@ -856,6 +867,7 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to get. responses: '200': description: A Benchmark. @@ -879,7 +891,7 @@ paths: post: tags: - V1Alpha - summary: Evaluate Rows + summary: Evaluate a list of rows on a benchmark. description: Typed endpoint for proper schema generation. operationId: evaluate_rows_v1alpha_eval_benchmarks__benchmark_id__evaluations_post requestBody: @@ -913,12 +925,12 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. /v1alpha/eval/benchmarks/{benchmark_id}/jobs: post: tags: - V1Alpha - summary: Run Eval + summary: Run an evaluation on a benchmark. description: Typed endpoint for proper schema generation. operationId: run_eval_v1alpha_eval_benchmarks__benchmark_id__jobs_post requestBody: @@ -952,12 +964,12 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}: delete: tags: - V1Alpha - summary: Job Cancel + summary: Cancel a job. description: Generic endpoint - this would be replaced with actual implementation. operationId: job_cancel_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__delete parameters: @@ -976,13 +988,13 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string - description: 'Path parameter: job_id' + description: The ID of the job to cancel. responses: '200': description: Successful Response @@ -1004,7 +1016,7 @@ paths: get: tags: - V1Alpha - summary: Job Status + summary: Get the status of a job. description: Query endpoint for proper schema generation. operationId: job_status_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__get parameters: @@ -1014,12 +1026,14 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string title: Job Id + description: The ID of the job to get the status of. responses: '200': description: The status of the evaluation job. @@ -1043,7 +1057,7 @@ paths: get: tags: - V1Alpha - summary: Job Result + summary: Get the result of a job. description: Query endpoint for proper schema generation. operationId: job_result_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__result_get parameters: @@ -1053,12 +1067,14 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string title: Job Id + description: The ID of the job to get the result of. responses: '200': description: The result of the job. @@ -1082,7 +1098,7 @@ paths: post: tags: - V1Alpha - summary: Rerank + summary: Rerank a list of documents based on their relevance to a query. description: Typed endpoint for proper schema generation. operationId: rerank_v1alpha_inference_rerank_post requestBody: @@ -1114,7 +1130,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Job Artifacts + summary: Get the artifacts of a training job. description: Query endpoint for proper schema generation. operationId: get_training_job_artifacts_v1alpha_post_training_job_artifacts_get parameters: @@ -1147,7 +1163,7 @@ paths: post: tags: - V1Alpha - summary: Cancel Training Job + summary: Cancel a training job. description: Generic endpoint - this would be replaced with actual implementation. operationId: cancel_training_job_v1alpha_post_training_job_cancel_post parameters: @@ -1183,7 +1199,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Job Status + summary: Get the status of a training job. description: Query endpoint for proper schema generation. operationId: get_training_job_status_v1alpha_post_training_job_status_get parameters: @@ -1216,7 +1232,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Jobs + summary: Get all training jobs. description: Response-only endpoint for proper schema generation. operationId: get_training_jobs_v1alpha_post_training_jobs_get responses: @@ -1242,7 +1258,7 @@ paths: post: tags: - V1Alpha - summary: Preference Optimize + summary: Run preference optimization of a model. description: Typed endpoint for proper schema generation. operationId: preference_optimize_v1alpha_post_training_preference_optimize_post requestBody: @@ -1274,7 +1290,7 @@ paths: post: tags: - V1Alpha - summary: Supervised Fine Tune + summary: Run supervised fine-tuning of a model. description: Typed endpoint for proper schema generation. operationId: supervised_fine_tune_v1alpha_post_training_supervised_fine_tune_post requestBody: @@ -1317,10 +1333,7 @@ components: required: - config title: AgentCandidate - description: 'An agent candidate for evaluation. - - - :param config: The configuration for the agent candidate.' + description: An agent candidate for evaluation. AgentConfig: properties: sampling_params: @@ -1387,19 +1400,7 @@ components: - model - instructions title: AgentConfig - description: 'Configuration for an agent. - - - :param model: The model identifier to use for the agent - - :param instructions: The system instructions for the agent - - :param name: Optional name for the agent, used in telemetry and identification - - :param enable_session_persistence: Optional flag indicating whether session - data has to be persisted - - :param response_format: Optional response format configuration' + description: Configuration for an agent. AgentCreateResponse: properties: agent_id: @@ -1409,10 +1410,7 @@ components: required: - agent_id title: AgentCreateResponse - description: 'Response returned when creating a new agent. - - - :param agent_id: Unique identifier for the created agent' + description: Response returned when creating a new agent. AgentSessionCreateResponse: properties: session_id: @@ -1422,10 +1420,7 @@ components: required: - session_id title: AgentSessionCreateResponse - description: 'Response returned when creating a new agent session. - - - :param session_id: Unique identifier for the created session' + description: Response returned when creating a new agent session. AgentToolGroupWithArgs: properties: name: @@ -1449,17 +1444,7 @@ components: - categorical_count - accuracy title: AggregationFunctionType - description: 'Types of aggregation functions for scoring results. - - :cvar average: Calculate the arithmetic mean of scores - - :cvar weighted_average: Calculate a weighted average of scores - - :cvar median: Calculate the median value of scores - - :cvar categorical_count: Count occurrences of categorical values - - :cvar accuracy: Calculate accuracy as the proportion of correct answers' + description: Types of aggregation functions for scoring results. Attachment-Output: properties: content: @@ -1493,12 +1478,7 @@ components: - content - mime_type title: Attachment - description: 'An attachment to an agent turn. - - - :param content: The content of the attachment. - - :param mime_type: The MIME type of the attachment.' + description: An attachment to an agent turn. BasicScoringFnParams: properties: type: @@ -1514,12 +1494,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: BasicScoringFnParams - description: 'Parameters for basic scoring function configuration. - - :param type: The type of scoring function parameters, always basic - - :param aggregation_functions: Aggregation functions to apply to the scores - of each row' + description: Parameters for basic scoring function configuration. Benchmark: properties: identifier: @@ -1559,17 +1534,7 @@ components: - dataset_id - scoring_functions title: Benchmark - description: 'A benchmark resource for evaluating model performance. - - - :param dataset_id: Identifier of the dataset to use for the benchmark evaluation - - :param scoring_functions: List of scoring function identifiers to apply during - evaluation - - :param metadata: Metadata for this evaluation task - - :param type: The resource type, always benchmark' + description: A benchmark resource for evaluating model performance. BenchmarkConfig: properties: eval_candidate: @@ -1607,16 +1572,7 @@ components: required: - eval_candidate title: BenchmarkConfig - description: 'A benchmark configuration for evaluation. - - - :param eval_candidate: The candidate to evaluate. - - :param scoring_params: Map between scoring function id and parameters for - each scoring function you want to run - - :param num_examples: (Optional) The number of examples to evaluate. If not - provided, all examples in the dataset will be evaluated' + description: A benchmark configuration for evaluation. BuiltinTool: type: string enum: @@ -1666,16 +1622,8 @@ components: - content - stop_reason title: CompletionMessage - description: "A message containing the model's (assistant) response in a chat\ - \ conversation.\n\n:param role: Must be \"assistant\" to identify this as\ - \ the model's response\n:param content: The content of the model's response\n\ - :param stop_reason: Reason why the model stopped generating. Options are:\n\ - \ - `StopReason.end_of_turn`: The model finished generating the entire\ - \ response.\n - `StopReason.end_of_message`: The model finished generating\ - \ but generated a partial response -- usually, a tool call. The user may call\ - \ the tool and continue the conversation with the tool's response.\n -\ - \ `StopReason.out_of_tokens`: The model ran out of token budget.\n:param tool_calls:\ - \ List of tool calls. Each tool call is a ToolCall object." + description: A message containing the model's (assistant) response in a chat + conversation. DPOAlignmentConfig: properties: beta: @@ -1688,12 +1636,7 @@ components: required: - beta title: DPOAlignmentConfig - description: 'Configuration for Direct Preference Optimization (DPO) alignment. - - - :param beta: Temperature parameter for the DPO loss - - :param loss_type: The type of loss function to use for DPO' + description: Configuration for Direct Preference Optimization (DPO) alignment. DPOLossType: type: string enum: @@ -1733,25 +1676,7 @@ components: - shuffle - data_format title: DataConfig - description: 'Configuration for training data and data loading. - - - :param dataset_id: Unique identifier for the training dataset - - :param batch_size: Number of samples per training batch - - :param shuffle: Whether to shuffle the dataset during training - - :param data_format: Format of the dataset (instruct or dialog) - - :param validation_dataset_id: (Optional) Unique identifier for the validation - dataset - - :param packed: (Optional) Whether to pack multiple samples into a single sequence - for efficiency - - :param train_on_input: (Optional) Whether to compute loss on input tokens - as well as output tokens' + description: Configuration for training data and data loading. Dataset: properties: identifier: @@ -1795,22 +1720,15 @@ components: - purpose - source title: Dataset - description: 'Dataset resource for storing and accessing training or evaluation + description: Dataset resource for storing and accessing training or evaluation data. - - - :param type: Type of resource, always ''dataset'' for datasets' DatasetFormat: type: string enum: - instruct - dialog title: DatasetFormat - description: 'Format of the training dataset. - - :cvar instruct: Instruction-following format with prompt and completion - - :cvar dialog: Multi-turn conversation format with messages' + description: Format of the training dataset. DatasetPurpose: type: string enum: @@ -1818,20 +1736,8 @@ components: - eval/question-answer - eval/messages-answer title: DatasetPurpose - description: "Purpose of the dataset. Each purpose has a required input data\ - \ schema.\n\n:cvar post-training/messages: The dataset contains messages used\ - \ for post-training.\n {\n \"messages\": [\n {\"role\"\ - : \"user\", \"content\": \"Hello, world!\"},\n {\"role\": \"assistant\"\ - , \"content\": \"Hello, world!\"},\n ]\n }\n:cvar eval/question-answer:\ - \ The dataset contains a question column and an answer column.\n {\n \ - \ \"question\": \"What is the capital of France?\",\n \"answer\"\ - : \"Paris\"\n }\n:cvar eval/messages-answer: The dataset contains a messages\ - \ column with list of messages and an answer column.\n {\n \"messages\"\ - : [\n {\"role\": \"user\", \"content\": \"Hello, my name is John\ - \ Doe.\"},\n {\"role\": \"assistant\", \"content\": \"Hello, John\ - \ Doe. How can I help you today?\"},\n {\"role\": \"user\", \"\ - content\": \"What's my name?\"},\n ],\n \"answer\": \"John Doe\"\ - \n }" + description: Purpose of the dataset. Each purpose has a required input data + schema. Document: properties: content: @@ -1865,12 +1771,7 @@ components: - content - mime_type title: Document - description: 'A document to be used by an agent. - - - :param content: The content of the document. - - :param mime_type: The MIME type of the document.' + description: A document to be used by an agent. EfficiencyConfig: properties: enable_activation_checkpointing: @@ -1891,20 +1792,7 @@ components: type: boolean type: object title: EfficiencyConfig - description: 'Configuration for memory and compute efficiency optimizations. - - - :param enable_activation_checkpointing: (Optional) Whether to use activation - checkpointing to reduce memory usage - - :param enable_activation_offloading: (Optional) Whether to offload activations - to CPU to save GPU memory - - :param memory_efficient_fsdp_wrap: (Optional) Whether to use memory-efficient - FSDP wrapping - - :param fsdp_cpu_offload: (Optional) Whether to offload FSDP parameters to - CPU' + description: Configuration for memory and compute efficiency optimizations. EvaluateResponse: properties: generations: @@ -1923,12 +1811,7 @@ components: - generations - scores title: EvaluateResponse - description: 'The response from an evaluation. - - - :param generations: The generations from the evaluation. - - :param scores: The scores from the evaluation.' + description: The response from an evaluation. GrammarResponseFormat: properties: type: @@ -1944,12 +1827,7 @@ components: required: - bnf title: GrammarResponseFormat - description: 'Configuration for grammar-guided response generation. - - - :param type: Must be "grammar" to identify this format type - - :param bnf: The BNF grammar specification the response should conform to' + description: Configuration for grammar-guided response generation. GreedySamplingStrategy: properties: type: @@ -1959,11 +1837,8 @@ components: default: greedy type: object title: GreedySamplingStrategy - description: 'Greedy sampling strategy that selects the highest probability - token at each step. - - - :param type: Must be "greedy" to identify this sampling strategy' + description: Greedy sampling strategy that selects the highest probability token + at each step. ImageContentItem-Input: properties: type: @@ -1977,12 +1852,7 @@ components: required: - image title: ImageContentItem - description: 'A image content item - - - :param type: Discriminator type of the content item. Always "image" - - :param image: Image as a base64 encoded string or an URL' + description: A image content item ImageContentItem-Output: properties: type: @@ -1996,12 +1866,7 @@ components: required: - image title: ImageContentItem - description: 'A image content item - - - :param type: Discriminator type of the content item. Always "image" - - :param image: Image as a base64 encoded string or an URL' + description: A image content item InferenceStep-Output: properties: turn_id: @@ -2031,10 +1896,7 @@ components: - step_id - model_response title: InferenceStep - description: 'An inference step in an agent turn. - - - :param model_response: The response from the LLM.' + description: An inference step in an agent turn. Job: properties: job_id: @@ -2047,12 +1909,7 @@ components: - job_id - status title: Job - description: 'A job execution instance with status tracking. - - - :param job_id: Unique identifier for the job - - :param status: Current execution status of the job' + description: A job execution instance with status tracking. JobStatus: type: string enum: @@ -2062,17 +1919,7 @@ components: - scheduled - cancelled title: JobStatus - description: 'Status of a job execution. - - :cvar completed: Job has finished successfully - - :cvar in_progress: Job is currently running - - :cvar failed: Job has failed during execution - - :cvar scheduled: Job is scheduled but not yet started - - :cvar cancelled: Job was cancelled before completion' + description: Status of a job execution. JsonSchemaResponseFormat: properties: type: @@ -2088,13 +1935,7 @@ components: required: - json_schema title: JsonSchemaResponseFormat - description: 'Configuration for JSON schema-guided response generation. - - - :param type: Must be "json_schema" to identify this format type - - :param json_schema: The JSON schema the response should conform to. In a Python - SDK, this is often a `pydantic` model.' + description: Configuration for JSON schema-guided response generation. LLMAsJudgeScoringFnParams: properties: type: @@ -2124,18 +1965,7 @@ components: required: - judge_model title: LLMAsJudgeScoringFnParams - description: 'Parameters for LLM-as-judge scoring function configuration. - - :param type: The type of scoring function parameters, always llm_as_judge - - :param judge_model: Identifier of the LLM model to use as a judge for scoring - - :param prompt_template: (Optional) Custom prompt template for the judge model - - :param judge_score_regexes: Regexes to extract the answer from generated response - - :param aggregation_functions: Aggregation functions to apply to the scores - of each row' + description: Parameters for LLM-as-judge scoring function configuration. ListBenchmarksResponse: properties: data: @@ -2158,10 +1988,7 @@ components: required: - data title: ListDatasetsResponse - description: 'Response from listing datasets. - - - :param data: List of datasets' + description: Response from listing datasets. ListPostTrainingJobsResponse: properties: data: @@ -2226,13 +2053,7 @@ components: - vector_store_ids - inserted_context title: MemoryRetrievalStep - description: 'A memory retrieval step in an agent turn. - - - :param vector_store_ids: The IDs of the vector databases to retrieve context - from. - - :param inserted_context: The context retrieved from the vector databases.' + description: A memory retrieval step in an agent turn. ModelCandidate: properties: type: @@ -2252,15 +2073,7 @@ components: - model - sampling_params title: ModelCandidate - description: 'A model candidate for evaluation. - - - :param model: The model ID to evaluate. - - :param sampling_params: The sampling parameters for the model. - - :param system_message: (Optional) The system message providing instructions - or context to the model.' + description: A model candidate for evaluation. OptimizerConfig: properties: optimizer_type: @@ -2281,16 +2094,7 @@ components: - weight_decay - num_warmup_steps title: OptimizerConfig - description: 'Configuration parameters for the optimization algorithm. - - - :param optimizer_type: Type of optimizer to use (adam, adamw, or sgd) - - :param lr: Learning rate for the optimizer - - :param weight_decay: Weight decay coefficient for regularization - - :param num_warmup_steps: Number of steps for learning rate warmup' + description: Configuration parameters for the optimization algorithm. OptimizerType: type: string enum: @@ -2298,13 +2102,7 @@ components: - adamw - sgd title: OptimizerType - description: 'Available optimizer algorithms for training. - - :cvar adam: Adaptive Moment Estimation optimizer - - :cvar adamw: AdamW optimizer with weight decay - - :cvar sgd: Stochastic Gradient Descent optimizer' + description: Available optimizer algorithms for training. PostTrainingJob: properties: job_uuid: @@ -2335,14 +2133,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: RegexParserScoringFnParams - description: 'Parameters for regex parser scoring function configuration. - - :param type: The type of scoring function parameters, always regex_parser - - :param parsing_regexes: Regex to extract the answer from generated response - - :param aggregation_functions: Aggregation functions to apply to the scores - of each row' + description: Parameters for regex parser scoring function configuration. RerankData: properties: index: @@ -2356,13 +2147,7 @@ components: - index - relevance_score title: RerankData - description: 'A single rerank result from a reranking response. - - - :param index: The original index of the document in the input list - - :param relevance_score: The relevance score from the model output. Values - are inverted when applicable so that higher scores indicate greater relevance.' + description: A single rerank result from a reranking response. RerankResponse: properties: data: @@ -2374,10 +2159,7 @@ components: required: - data title: RerankResponse - description: 'Response from a reranking request. - - - :param data: List of rerank result objects, sorted by relevance score (descending)' + description: Response from a reranking request. RowsDataSource: properties: type: @@ -2395,10 +2177,7 @@ components: required: - rows title: RowsDataSource - description: "A dataset stored in rows.\n:param rows: The dataset is stored\ - \ in rows. E.g.\n - [\n {\"messages\": [{\"role\": \"user\", \"\ - content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello,\ - \ world!\"}]}\n ]" + description: A dataset stored in rows. SafetyViolation: properties: violation_level: @@ -2414,15 +2193,7 @@ components: required: - violation_level title: SafetyViolation - description: 'Details of a safety violation detected by content moderation. - - - :param violation_level: Severity level of the violation - - :param user_message: (Optional) Message to convey to the user about the violation - - :param metadata: Additional metadata including specific violation codes for - debugging and telemetry' + description: Details of a safety violation detected by content moderation. SamplingParams: properties: strategy: @@ -2451,14 +2222,7 @@ components: type: array type: object title: SamplingParams - description: "Sampling parameters.\n\n:param strategy: The sampling strategy.\n\ - :param max_tokens: The maximum number of tokens that can be generated in the\ - \ completion. The token count of\n your prompt plus max_tokens cannot exceed\ - \ the model's context length.\n:param repetition_penalty: Number between -2.0\ - \ and 2.0. Positive values penalize new tokens\n based on whether they\ - \ appear in the text so far, increasing the model's likelihood to talk about\ - \ new topics.\n:param stop: Up to 4 sequences where the API will stop generating\ - \ further tokens.\n The returned text will not contain the stop sequence." + description: Sampling parameters. ScoringResult: properties: score_rows: @@ -2476,13 +2240,7 @@ components: - score_rows - aggregated_results title: ScoringResult - description: 'A scoring result for a single row. - - - :param score_rows: The scoring result for each row. Each row is a map of column - name to value. - - :param aggregated_results: Map of metric name to aggregated value' + description: A scoring result for a single row. ShieldCallStep-Output: properties: turn_id: @@ -2512,10 +2270,7 @@ components: - step_id - violation title: ShieldCallStep - description: 'A shield call step in an agent turn. - - - :param violation: The violation from the shield call.' + description: A shield call step in an agent turn. StopReason: type: string enum: @@ -2556,26 +2311,14 @@ components: required: - content title: SystemMessage - description: 'A system message providing instructions or context to the model. - - - :param role: Must be "system" to identify this as a system message - - :param content: The content of the "system prompt". If multiple system messages - are provided, they are concatenated. The underlying Llama Stack code may also - add other system messages (for example, for formatting tool definitions).' + description: A system message providing instructions or context to the model. SystemMessageBehavior: type: string enum: - append - replace title: SystemMessageBehavior - description: "Config for how to override the default system prompt.\n\n:cvar\ - \ append: Appends the provided system message to the default system prompt:\n\ - \ https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_2/#-function-definitions-in-the-system-prompt-\n\ - :cvar replace: Replaces the default system prompt with the provided system\ - \ message. The system message can include the string\n '{{function_definitions}}'\ - \ to indicate where the function definitions should be inserted." + description: Config for how to override the default system prompt. TextContentItem: properties: type: @@ -2590,12 +2333,7 @@ components: required: - text title: TextContentItem - description: 'A text content item - - - :param type: Discriminator type of the content item. Always "text" - - :param text: Text content' + description: A text content item ToolCall: properties: call_id: @@ -2622,16 +2360,9 @@ components: - required - none title: ToolChoice - description: 'Whether tool use is required or automatic. This is a hint to the + description: Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model. - - - :cvar auto: The model may use tools if it determines that is appropriate. - - :cvar required: The model must use tools. - - :cvar none: The model must not use tools.' ToolConfig: properties: tool_choice: @@ -2647,21 +2378,7 @@ components: $ref: '#/components/schemas/SystemMessageBehavior' type: object title: ToolConfig - description: "Configuration for tool use.\n\n:param tool_choice: (Optional)\ - \ Whether tool use is automatic, required, or none. Can also specify a tool\ - \ name to use a specific tool. Defaults to ToolChoice.auto.\n:param tool_prompt_format:\ - \ (Optional) Instructs the model how to format tool calls. By default, Llama\ - \ Stack will attempt to use a format that is best adapted to the model.\n\ - \ - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object.\n\ - \ - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a \ - \ tag.\n - `ToolPromptFormat.python_list`: The tool calls are output as\ - \ Python syntax -- a list of function calls.\n:param system_message_behavior:\ - \ (Optional) Config for how to override the default system prompt.\n -\ - \ `SystemMessageBehavior.append`: Appends the provided system message to the\ - \ default system prompt.\n - `SystemMessageBehavior.replace`: Replaces\ - \ the default system prompt with the provided system message. The system message\ - \ can include the string\n '{{function_definitions}}' to indicate where\ - \ the function definitions should be inserted." + description: Configuration for tool use. ToolDef: properties: toolgroup_id: @@ -2689,21 +2406,7 @@ components: required: - name title: ToolDef - description: 'Tool definition used in runtime contexts. - - - :param name: Name of the tool - - :param description: (Optional) Human-readable description of what the tool - does - - :param input_schema: (Optional) JSON Schema for tool inputs (MCP inputSchema) - - :param output_schema: (Optional) JSON Schema for tool outputs (MCP outputSchema) - - :param metadata: (Optional) Additional metadata about the tool - - :param toolgroup_id: (Optional) ID of the tool group this tool belongs to' + description: Tool definition used in runtime contexts. ToolExecutionStep-Output: properties: turn_id: @@ -2742,12 +2445,7 @@ components: - tool_calls - tool_responses title: ToolExecutionStep - description: 'A tool execution step in an agent turn. - - - :param tool_calls: The tool calls to execute. - - :param tool_responses: The tool responses from the tool calls.' + description: A tool execution step in an agent turn. ToolPromptFormat: type: string enum: @@ -2755,16 +2453,7 @@ components: - function_tag - python_list title: ToolPromptFormat - description: "Prompt format for calling custom / zero shot tools.\n\n:cvar json:\ - \ JSON format for calling tools. It takes the form:\n {\n \"type\"\ - : \"function\",\n \"function\" : {\n \"name\": \"function_name\"\ - ,\n \"description\": \"function_description\",\n \"\ - parameters\": {...}\n }\n }\n:cvar function_tag: Function tag format,\ - \ pseudo-XML. This looks like:\n (parameters)\n\ - \n:cvar python_list: Python list. The output is a valid Python expression\ - \ that can be\n evaluated to a list. Each element in the list is a function\ - \ call. Example:\n [\"function_name(param1, param2)\", \"function_name(param1,\ - \ param2)\"]" + description: Prompt format for calling custom / zero shot tools. ToolResponse-Input: properties: call_id: @@ -2807,16 +2496,7 @@ components: - tool_name - content title: ToolResponse - description: 'Response from a tool invocation. - - - :param call_id: Unique identifier for the tool call this response is for - - :param tool_name: Name of the tool that was invoked - - :param content: The response content from the tool - - :param metadata: (Optional) Additional metadata about the tool response' + description: Response from a tool invocation. ToolResponse-Output: properties: call_id: @@ -2859,16 +2539,7 @@ components: - tool_name - content title: ToolResponse - description: 'Response from a tool invocation. - - - :param call_id: Unique identifier for the tool call this response is for - - :param tool_name: Name of the tool that was invoked - - :param content: The response content from the tool - - :param metadata: (Optional) Additional metadata about the tool response' + description: Response from a tool invocation. ToolResponseMessage-Output: properties: role: @@ -2906,14 +2577,7 @@ components: - call_id - content title: ToolResponseMessage - description: 'A message representing the result of a tool invocation. - - - :param role: Must be "tool" to identify this as a tool response - - :param call_id: Unique identifier for the tool call this response is for - - :param content: The response content from the tool' + description: A message representing the result of a tool invocation. TopKSamplingStrategy: properties: type: @@ -2929,14 +2593,8 @@ components: required: - top_k title: TopKSamplingStrategy - description: 'Top-k sampling strategy that restricts sampling to the k most - likely tokens. - - - :param type: Must be "top_k" to identify this sampling strategy - - :param top_k: Number of top tokens to consider for sampling. Must be at least - 1' + description: Top-k sampling strategy that restricts sampling to the k most likely + tokens. TopPSamplingStrategy: properties: type: @@ -2956,17 +2614,8 @@ components: required: - temperature title: TopPSamplingStrategy - description: 'Top-p (nucleus) sampling strategy that samples from the smallest + description: Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p. - - - :param type: Must be "top_p" to identify this sampling strategy - - :param temperature: Controls randomness in sampling. Higher values increase - randomness - - :param top_p: Cumulative probability threshold for nucleus sampling. Defaults - to 0.95' TrainingConfig: properties: n_epochs: @@ -2998,27 +2647,7 @@ components: required: - n_epochs title: TrainingConfig - description: 'Comprehensive configuration for the training process. - - - :param n_epochs: Number of training epochs to run - - :param max_steps_per_epoch: Maximum number of steps to run per epoch - - :param gradient_accumulation_steps: Number of steps to accumulate gradients - before updating - - :param max_validation_steps: (Optional) Maximum number of validation steps - per epoch - - :param data_config: (Optional) Configuration for data loading and formatting - - :param optimizer_config: (Optional) Configuration for the optimization algorithm - - :param efficiency_config: (Optional) Configuration for memory and compute - optimizations - - :param dtype: (Optional) Data type for model parameters (bf16, fp16, fp32)' + description: Comprehensive configuration for the training process. Turn: properties: turn_id: @@ -3074,26 +2703,7 @@ components: - output_message - started_at title: Turn - description: 'A single turn in an interaction with an Agentic System. - - - :param turn_id: Unique identifier for the turn within a session - - :param session_id: Unique identifier for the conversation session - - :param input_messages: List of messages that initiated this turn - - :param steps: Ordered list of processing steps executed during this turn - - :param output_message: The model''s generated response containing content - and metadata - - :param output_attachments: (Optional) Files or media attached to the agent''s - response - - :param started_at: Timestamp when the turn began - - :param completed_at: (Optional) Timestamp when the turn finished, if completed' + description: A single turn in an interaction with an Agentic System. URIDataSource: properties: type: @@ -3108,9 +2718,7 @@ components: required: - uri title: URIDataSource - description: "A dataset that can be obtained from a URI.\n:param uri: The dataset\ - \ can be obtained from a URI. E.g.\n - \"https://mywebsite.com/mydata.jsonl\"\ - \n - \"lsfs://mydata.jsonl\"\n - \"data:csv;base64,{base64_content}\"" + description: A dataset that can be obtained from a URI. URL: properties: uri: @@ -3120,10 +2728,7 @@ components: required: - uri title: URL - description: 'A URL reference to external content. - - - :param uri: The URL string pointing to the resource' + description: A URL reference to external content. UserMessage-Input: properties: role: @@ -3179,16 +2784,7 @@ components: required: - content title: UserMessage - description: 'A message from the user in a chat conversation. - - - :param role: Must be "user" to identify this as a user message - - :param content: The content of the message, which can include text and other - media - - :param context: (Optional) This field is used internally by Llama Stack to - pass RAG context. This field may be removed in the API in the future.' + description: A message from the user in a chat conversation. UserMessage-Output: properties: role: @@ -3244,16 +2840,7 @@ components: required: - content title: UserMessage - description: 'A message from the user in a chat conversation. - - - :param role: Must be "user" to identify this as a user message - - :param content: The content of the message, which can include text and other - media - - :param context: (Optional) This field is used internally by Llama Stack to - pass RAG context. This field may be removed in the API in the future.' + description: A message from the user in a chat conversation. ViolationLevel: type: string enum: @@ -3261,14 +2848,7 @@ components: - warn - error title: ViolationLevel - description: 'Severity level of a safety violation. - - - :cvar INFO: Informational level violation that does not require action - - :cvar WARN: Warning level violation that suggests caution but allows continuation - - :cvar ERROR: Error level violation that requires blocking or intervention' + description: Severity level of a safety violation. _URLOrData: properties: url: @@ -3279,13 +2859,7 @@ components: type: string type: object title: _URLOrData - description: 'A URL or a base64 encoded string - - - :param url: A URL of the image or data URL in the format of data:image/{type};base64,{data}. - Note that URL could have length limits. - - :param data: base64 encoded image data as string' + description: A URL or a base64 encoded string __main_____agents_agent_id_session_Request: properties: agent_id: @@ -3393,18 +2967,7 @@ components: - max_num_results title: _inference_rerank_Request Error: - description: 'Error response from the API. Roughly follows RFC 7807. - - - :param status: HTTP status code - - :param title: Error title, a short summary of the error which is invariant - for an error type - - :param detail: Error detail, a longer human-readable description of the error - - :param instance: (Optional) A URL which can be used to retrieve more information - about the specific occurrence of the error' + description: Error response from the API. Roughly follows RFC 7807. properties: status: title: Status @@ -3426,14 +2989,7 @@ components: title: Error type: object Agent: - description: 'An agent instance with configuration and metadata. - - - :param agent_id: Unique identifier for the agent - - :param agent_config: Configuration settings for the agent - - :param created_at: Timestamp when the agent was created' + description: An agent instance with configuration and metadata. properties: agent_id: title: Agent Id @@ -3451,10 +3007,7 @@ components: title: Agent type: object AgentStepResponse: - description: 'Response containing details of a specific agent step. - - - :param step: The complete step data and execution details' + description: Response containing details of a specific agent step. properties: step: discriminator: @@ -3475,16 +3028,8 @@ components: title: AgentStepResponse type: object CompletionMessage: - description: "A message containing the model's (assistant) response in a chat\ - \ conversation.\n\n:param role: Must be \"assistant\" to identify this as\ - \ the model's response\n:param content: The content of the model's response\n\ - :param stop_reason: Reason why the model stopped generating. Options are:\n\ - \ - `StopReason.end_of_turn`: The model finished generating the entire\ - \ response.\n - `StopReason.end_of_message`: The model finished generating\ - \ but generated a partial response -- usually, a tool call. The user may call\ - \ the tool and continue the conversation with the tool's response.\n -\ - \ `StopReason.out_of_tokens`: The model ran out of token budget.\n:param tool_calls:\ - \ List of tool calls. Each tool call is a ToolCall object." + description: A message containing the model's (assistant) response in a chat + conversation. properties: role: const: assistant @@ -3526,10 +3071,7 @@ components: title: CompletionMessage type: object InferenceStep: - description: 'An inference step in an agent turn. - - - :param model_response: The response from the LLM.' + description: An inference step in an agent turn. properties: turn_id: title: Turn Id @@ -3561,13 +3103,7 @@ components: title: InferenceStep type: object MemoryRetrievalStep: - description: 'A memory retrieval step in an agent turn. - - - :param vector_store_ids: The IDs of the vector databases to retrieve context - from. - - :param inserted_context: The context retrieved from the vector databases.' + description: A memory retrieval step in an agent turn. properties: turn_id: title: Turn Id @@ -3623,14 +3159,7 @@ components: title: MemoryRetrievalStep type: object PaginatedResponse: - description: 'A generic paginated response that follows a simple format. - - - :param data: The list of items for the current page - - :param has_more: Whether there are more items available after this set - - :param url: The URL for accessing this list' + description: A generic paginated response that follows a simple format. properties: data: items: @@ -3651,16 +3180,7 @@ components: title: PaginatedResponse type: object Session: - description: 'A single session of an interaction with an Agentic System. - - - :param session_id: Unique identifier for the conversation session - - :param session_name: Human-readable name for the session - - :param turns: List of all turns that have occurred in this session - - :param started_at: Timestamp when the session was created' + description: A single session of an interaction with an Agentic System. properties: session_id: title: Session Id @@ -3685,10 +3205,7 @@ components: title: Session type: object ShieldCallStep: - description: 'A shield call step in an agent turn. - - - :param violation: The violation from the shield call.' + description: A shield call step in an agent turn. properties: turn_id: title: Turn Id @@ -3720,12 +3237,7 @@ components: title: ShieldCallStep type: object ToolExecutionStep: - description: 'A tool execution step in an agent turn. - - - :param tool_calls: The tool calls to execute. - - :param tool_responses: The tool responses from the tool calls.' + description: A tool execution step in an agent turn. properties: turn_id: title: Turn Id @@ -3766,16 +3278,7 @@ components: title: ToolExecutionStep type: object ToolResponse: - description: 'Response from a tool invocation. - - - :param call_id: Unique identifier for the tool call this response is for - - :param tool_name: Name of the tool that was invoked - - :param content: The response content from the tool - - :param metadata: (Optional) Additional metadata about the tool response' + description: Response from a tool invocation. properties: call_id: title: Call Id @@ -3819,22 +3322,7 @@ components: title: ToolResponse type: object Checkpoint: - description: 'Checkpoint created during training runs. - - - :param identifier: Unique identifier for the checkpoint - - :param created_at: Timestamp when the checkpoint was created - - :param epoch: Training epoch when the checkpoint was saved - - :param post_training_job_id: Identifier of the training job that created this - checkpoint - - :param path: File system path where the checkpoint is stored - - :param training_metrics: (Optional) Training metrics associated with this - checkpoint' + description: Checkpoint created during training runs. properties: identifier: title: Identifier @@ -3864,12 +3352,7 @@ components: title: Checkpoint type: object PostTrainingJobArtifactsResponse: - description: 'Artifacts of a finetuning job. - - - :param job_uuid: Unique identifier for the training job - - :param checkpoints: List of model checkpoints created during training' + description: Artifacts of a finetuning job. properties: job_uuid: title: Job Uuid @@ -3884,23 +3367,7 @@ components: title: PostTrainingJobArtifactsResponse type: object PostTrainingJobStatusResponse: - description: 'Status of a finetuning job. - - - :param job_uuid: Unique identifier for the training job - - :param status: Current status of the training job - - :param scheduled_at: (Optional) Timestamp when the job was scheduled - - :param started_at: (Optional) Timestamp when the job execution began - - :param completed_at: (Optional) Timestamp when the job finished, if completed - - :param resources_allocated: (Optional) Information about computational resources - allocated to the job - - :param checkpoints: List of model checkpoints created during training' + description: Status of a finetuning job. properties: job_uuid: title: Job Uuid @@ -3938,12 +3405,7 @@ components: title: PostTrainingJobStatusResponse type: object ImageContentItem: - description: 'A image content item - - - :param type: Discriminator type of the content item. Always "image" - - :param image: Image as a base64 encoded string or an URL' + description: A image content item properties: type: const: image @@ -3957,16 +3419,7 @@ components: title: ImageContentItem type: object PostTrainingMetric: - description: 'Training metrics captured during post-training jobs. - - - :param epoch: Training epoch number - - :param train_loss: Loss value on the training dataset - - :param validation_loss: Loss value on the validation dataset - - :param perplexity: Perplexity metric indicating model confidence' + description: Training metrics captured during post-training jobs. properties: epoch: title: Epoch @@ -4017,9 +3470,9 @@ components: example: status: 500 title: Internal Server Error - detail: An unexpected error occurred + detail: An unexpected error occurred. Our team has been notified. DefaultError: - description: An error occurred + description: An unexpected error occurred content: application/json: schema: diff --git a/docs/static/llama-stack-spec.json b/docs/static/llama-stack-spec.json new file mode 100644 index 000000000..396640dab --- /dev/null +++ b/docs/static/llama-stack-spec.json @@ -0,0 +1,11541 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Llama Stack API", + "description": "A comprehensive API for building and deploying AI applications", + "version": "1.0.0" + }, + "servers": [ + { + "url": "https://api.llamastack.com", + "description": "Production server" + }, + { + "url": "https://staging-api.llamastack.com", + "description": "Staging server" + } + ], + "paths": { + "/v1/batches": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all batches for the current user.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_batches_v1_batches_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + } + ], + "responses": { + "200": { + "description": "A list of batch objects.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBatchesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create a new batch for processing multiple API requests.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_batch_v1_batches_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_batches_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The created batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/batches/{batch_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve information about a specific batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "retrieve_batch_v1_batches__batch_id__get", + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the batch to retrieve." + } + ], + "responses": { + "200": { + "description": "The batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/batches/{batch_id}/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancel a batch that is in progress.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "cancel_batch_v1_batches__batch_id__cancel_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_batches_batch_id_cancel_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The updated batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the batch to cancel." + } + ] + } + }, + "/v1/chat/completions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List chat completions.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_chat_completions_v1_chat_completions_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "model", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "A ListOpenAIChatCompletionResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIChatCompletionResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create chat completions.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_chat_completion_v1_chat_completions_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIChatCompletionRequestWithExtraBody" + } + } + } + }, + "responses": { + "200": { + "description": "An OpenAIChatCompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIChatCompletion" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/chat/completions/{completion_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get chat completion.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_chat_completion_v1_chat_completions__completion_id__get", + "parameters": [ + { + "name": "completion_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Completion Id" + }, + "description": "ID of the chat completion." + } + ], + "responses": { + "200": { + "description": "A OpenAICompletionWithInputMessages.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/completions": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create completion.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_completion_v1_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An OpenAICompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletion" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/conversations": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a conversation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_conversation_v1_conversations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_conversations_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The created conversation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Conversation" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/conversations/{conversation_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a conversation.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_conversation_v1_conversations__conversation_id__delete", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + } + ], + "responses": { + "200": { + "description": "The deleted conversation resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationDeletedResource" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve a conversation.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_conversation_v1_conversations__conversation_id__get", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + } + ], + "responses": { + "200": { + "description": "The conversation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Conversation" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Update a conversation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "update_conversation_v1_conversations__conversation_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_conversations_conversation_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The updated conversation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Conversation" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The conversation identifier." + } + ] + } + }, + "/v1/conversations/{conversation_id}/items": { + "get": { + "tags": [ + "V1" + ], + "summary": "List items.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_items_v1_conversations__conversation_id__items_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "include", + "in": "query", + "required": true, + "schema": { + "$ref": "#/components/schemas/ConversationItemInclude" + } + }, + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Order" + } + }, + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + } + ], + "responses": { + "200": { + "description": "List of conversation items.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationItemList" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create items.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "add_items_v1_conversations__conversation_id__items_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_conversations_conversation_id_items_Request" + } + } + } + }, + "responses": { + "200": { + "description": "List of created items.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationItemList" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The conversation identifier." + } + ] + } + }, + "/v1/conversations/{conversation_id}/items/{item_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete an item.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_conversation_item_v1_conversations__conversation_id__items__item_id__delete", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + }, + { + "name": "item_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Item Id" + }, + "description": "The item identifier." + } + ], + "responses": { + "200": { + "description": "The deleted item resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationItemDeletedResource" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve an item.", + "description": "Query endpoint for proper schema generation.", + "operationId": "retrieve_v1_conversations__conversation_id__items__item_id__get", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + }, + { + "name": "item_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Item Id" + }, + "description": "The item identifier." + } + ], + "responses": { + "200": { + "description": "The conversation item.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/embeddings": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create embeddings.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_embeddings_v1_embeddings_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An OpenAIEmbeddingsResponse containing the embeddings.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "List files.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_v1_files_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "purpose", + "in": "query", + "required": true, + "schema": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 10000, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "An ListOpenAIFileResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIFileResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Upload file.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "openai_upload_file_v1_files_post", + "responses": { + "200": { + "description": "An OpenAIFileObject representing the uploaded file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/files/{file_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_file_v1_files__file_id__delete", + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "An OpenAIFileDeleteResponse indicating successful deletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_file_v1_files__file_id__get", + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "An OpenAIFileObject containing file information.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/files/{file_id}/content": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve file content.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "openai_retrieve_file_content_v1_files__file_id__content_get", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "The raw file content as a binary response.", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/health": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get health status.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "health_v1_health_get", + "responses": { + "200": { + "description": "Health information indicating if the service is operational.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthInfo" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/inspect/routes": { + "get": { + "tags": [ + "V1" + ], + "summary": "List routes.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_routes_v1_inspect_routes_get", + "responses": { + "200": { + "description": "Response containing information about all available routes.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListRoutesResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/models": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all models.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_models_v1_models_get", + "responses": { + "200": { + "description": "A ListModelsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_model_v1_models_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_models_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Model.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Model" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/models/{model_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister model.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_model_v1_models__model_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "model_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: model_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get model.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_model_v1_models__model_id__get", + "parameters": [ + { + "name": "model_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Id" + } + } + ], + "responses": { + "200": { + "description": "A Model.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Model" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/moderations": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create moderation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_moderation_v1_moderations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_moderations_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A moderation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModerationObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/prompts": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all prompts.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_prompts_v1_prompts_get", + "responses": { + "200": { + "description": "A ListPromptsResponse containing all prompts.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPromptsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create prompt.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_prompt_v1_prompts_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_prompts_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The created Prompt resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/prompts/{prompt_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete prompt.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_prompt_v1_prompts__prompt_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The identifier of the prompt to delete." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get prompt.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_prompt_v1_prompts__prompt_id__get", + "parameters": [ + { + "name": "version", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Version" + } + }, + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Prompt Id" + }, + "description": "The identifier of the prompt to get." + } + ], + "responses": { + "200": { + "description": "A Prompt resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Update prompt.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "update_prompt_v1_prompts__prompt_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_prompts_prompt_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The updated Prompt resource with incremented version.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The identifier of the prompt to update." + } + ] + } + }, + "/v1/prompts/{prompt_id}/set-default-version": { + "post": { + "tags": [ + "V1" + ], + "summary": "Set prompt version.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "set_default_version_v1_prompts__prompt_id__set_default_version_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_prompts_prompt_id_set_default_version_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The prompt with the specified version now set as default.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The identifier of the prompt." + } + ] + } + }, + "/v1/prompts/{prompt_id}/versions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List prompt versions.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_prompt_versions_v1_prompts__prompt_id__versions_get", + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Prompt Id" + }, + "description": "The identifier of the prompt to list versions for." + } + ], + "responses": { + "200": { + "description": "A ListPromptsResponse containing all versions of the prompt.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPromptsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/providers": { + "get": { + "tags": [ + "V1" + ], + "summary": "List providers.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_providers_v1_providers_get", + "responses": { + "200": { + "description": "A ListProvidersResponse containing information about all providers.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListProvidersResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/providers/{provider_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get provider.", + "description": "Query endpoint for proper schema generation.", + "operationId": "inspect_provider_v1_providers__provider_id__get", + "parameters": [ + { + "name": "provider_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Provider Id" + }, + "description": "The ID of the provider to inspect." + } + ], + "responses": { + "200": { + "description": "A ProviderInfo object containing the provider's details.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProviderInfo" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/responses": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all responses.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_openai_responses_v1_responses_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "model", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 50, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "A ListOpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create a model response.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_openai_response_v1_responses_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_responses_Request" + } + } + } + }, + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/responses/{response_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a response.", + "description": "Query endpoint for proper schema generation.", + "operationId": "delete_openai_response_v1_responses__response_id__delete", + "parameters": [ + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the OpenAI response to delete." + } + ], + "responses": { + "200": { + "description": "An OpenAIDeleteResponseObject", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIDeleteResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a model response.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_openai_response_v1_responses__response_id__get", + "parameters": [ + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the OpenAI response to retrieve." + } + ], + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/responses/{response_id}/input_items": { + "get": { + "tags": [ + "V1" + ], + "summary": "List input items.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_openai_response_input_items_v1_responses__response_id__input_items_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "include", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Include" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + }, + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the response to retrieve input items for." + } + ], + "responses": { + "200": { + "description": "An ListOpenAIResponseInputItem.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseInputItem" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/safety/run-shield": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run shield.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_shield_v1_safety_run_shield_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_safety_run_shield_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A RunShieldResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RunShieldResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/scoring-functions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all scoring functions.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_scoring_functions_v1_scoring_functions_get", + "responses": { + "200": { + "description": "A ListScoringFunctionsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListScoringFunctionsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a scoring function.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_scoring_function_v1_scoring_functions_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/scoring-functions/{scoring_fn_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a scoring function.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_scoring_function_v1_scoring_functions__scoring_fn_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "scoring_fn_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: scoring_fn_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a scoring function by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_scoring_function_v1_scoring_functions__scoring_fn_id__get", + "parameters": [ + { + "name": "scoring_fn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Scoring Fn Id" + } + } + ], + "responses": { + "200": { + "description": "A ScoringFn.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoringFn" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/scoring/score": { + "post": { + "tags": [ + "V1" + ], + "summary": "Score a list of rows.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "score_v1_scoring_score_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_scoring_score_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A ScoreResponse object containing rows and aggregated results.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoreResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/scoring/score-batch": { + "post": { + "tags": [ + "V1" + ], + "summary": "Score a batch of rows.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "score_batch_v1_scoring_score_batch_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_scoring_score_batch_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A ScoreBatchResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoreBatchResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/shields": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all shields.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_shields_v1_shields_get", + "responses": { + "200": { + "description": "A ListShieldsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListShieldsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a shield.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_shield_v1_shields_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_shields_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/shields/{identifier}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a shield.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_shield_v1_shields__identifier__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "identifier", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: identifier" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a shield by its identifier.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_shield_v1_shields__identifier__get", + "parameters": [ + { + "name": "identifier", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Identifier" + } + } + ], + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tool-runtime/invoke": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run a tool with the given arguments.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "invoke_tool_v1_tool_runtime_invoke_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_tool_runtime_invoke_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A ToolInvocationResult.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolInvocationResult" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/tool-runtime/list-tools": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all tools in the runtime.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_runtime_tools_v1_tool_runtime_list_tools_get", + "parameters": [ + { + "name": "tool_group_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Tool Group Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/URL" + } + } + } + }, + "responses": { + "200": { + "description": "A ListToolDefsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolDefsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tool-runtime/rag-tool/insert": { + "post": { + "tags": [ + "V1" + ], + "summary": "Index documents so they can be used by the RAG system.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "rag_tool_insert_v1_tool_runtime_rag_tool_insert_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tool-runtime/rag-tool/query": { + "post": { + "tags": [ + "V1" + ], + "summary": "Query the RAG system for context; typically invoked by the agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "rag_tool_query_v1_tool_runtime_rag_tool_query_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_tool_runtime_rag_tool_query_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "RAGQueryResult containing the retrieved content and metadata", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RAGQueryResult" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/toolgroups": { + "get": { + "tags": [ + "V1" + ], + "summary": "List tool groups with optional provider.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_tool_groups_v1_toolgroups_get", + "responses": { + "200": { + "description": "A ListToolGroupsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolGroupsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a tool group.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_tool_group_v1_toolgroups_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/toolgroups/{toolgroup_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a tool group.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_toolgroup_v1_toolgroups__toolgroup_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "toolgroup_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: toolgroup_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a tool group by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_tool_group_v1_toolgroups__toolgroup_id__get", + "parameters": [ + { + "name": "toolgroup_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Toolgroup Id" + } + } + ], + "responses": { + "200": { + "description": "A ToolGroup.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolGroup" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tools": { + "get": { + "tags": [ + "V1" + ], + "summary": "List tools with optional tool group.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_tools_v1_tools_get", + "parameters": [ + { + "name": "toolgroup_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Toolgroup Id" + } + } + ], + "responses": { + "200": { + "description": "A ListToolDefsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolDefsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tools/{tool_name}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get a tool by its name.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_tool_v1_tools__tool_name__get", + "parameters": [ + { + "name": "tool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Tool Name" + } + } + ], + "responses": { + "200": { + "description": "A ToolDef.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolDef" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector-io/insert": { + "post": { + "tags": [ + "V1" + ], + "summary": "Insert chunks into a vector database.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "insert_chunks_v1_vector_io_insert_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector-io/query": { + "post": { + "tags": [ + "V1" + ], + "summary": "Query chunks from a vector database.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "query_chunks_v1_vector_io_query_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_io_query_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A QueryChunksResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueryChunksResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/vector_stores": { + "get": { + "tags": [ + "V1" + ], + "summary": "Returns a list of vector stores.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_vector_stores_v1_vector_stores_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + } + ], + "responses": { + "200": { + "description": "A VectorStoreListResponse containing the list of vector stores.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreListResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Creates a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_create_vector_store_v1_vector_stores_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICreateVectorStoreRequestWithExtraBody" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreObject representing the created vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_vector_store_v1_vector_stores__vector_store_id__delete", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to delete." + } + ], + "responses": { + "200": { + "description": "A VectorStoreDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_v1_vector_stores__vector_store_id__get", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to retrieve." + } + ], + "responses": { + "200": { + "description": "A VectorStoreObject representing the vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Updates a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_update_vector_store_v1_vector_stores__vector_store_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreObject representing the updated vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to update." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a vector store file batch.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_create_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICreateVectorStoreFileBatchRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the created file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to create the file batch for." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve a vector store file batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__get", + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the file batch to retrieve." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file batch." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancels a vector store file batch.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_cancel_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the cancelled file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file batch to cancel." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store containing the file batch." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "Returns a list of vector store files in a batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_in_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "filter", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Filter" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + }, + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the file batch to list files from." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file batch." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFilesListInBatchResponse containing the list of files in the batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFilesListInBatchResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "List files in a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_in_vector_store_v1_vector_stores__vector_store_id__files_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "filter", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Filter" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to list files from." + } + ], + "responses": { + "200": { + "description": "A VectorStoreListFilesResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreListFilesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Attach a file to a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_attach_file_to_vector_store_v1_vector_stores__vector_store_id__files_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_files_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the attached file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to attach the file to." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/files/{file_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__delete", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to delete." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to delete." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__get", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to retrieve." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to retrieve." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Updates a vector store file.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_update_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_files_file_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the updated file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store containing the file to update." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file to update." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/files/{file_id}/content": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves the contents of a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_contents_v1_vector_stores__vector_store_id__files__file_id__content_get", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to retrieve." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to retrieve." + } + ], + "responses": { + "200": { + "description": "A list of InterleavedContent representing the file contents.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileContentsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}/search": { + "post": { + "tags": [ + "V1" + ], + "summary": "Search for chunks in a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_search_vector_store_v1_vector_stores__vector_store_id__search_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_search_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreSearchResponse containing the search results.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreSearchResponsePage" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to search." + } + ] + } + }, + "/v1/version": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get version.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "version_v1_version_get", + "responses": { + "200": { + "description": "Version information containing the service version number.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VersionInfo" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + } + }, + "components": { + "schemas": { + "AgentTurnInputType": { + "properties": { + "type": { + "type": "string", + "const": "agent_turn_input", + "title": "Type", + "default": "agent_turn_input" + } + }, + "type": "object", + "title": "AgentTurnInputType", + "description": "Parameter type for agent turn input." + }, + "AggregationFunctionType": { + "type": "string", + "enum": [ + "average", + "weighted_average", + "median", + "categorical_count", + "accuracy" + ], + "title": "AggregationFunctionType", + "description": "Types of aggregation functions for scoring results." + }, + "AllowedToolsFilter": { + "properties": { + "tool_names": { + "title": "Tool Names", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "AllowedToolsFilter", + "description": "Filter configuration for restricting which MCP tools can be used." + }, + "ApprovalFilter": { + "properties": { + "always": { + "title": "Always", + "items": { + "type": "string" + }, + "type": "array" + }, + "never": { + "title": "Never", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "ApprovalFilter", + "description": "Filter configuration for MCP tool approval requirements." + }, + "ArrayType": { + "properties": { + "type": { + "type": "string", + "const": "array", + "title": "Type", + "default": "array" + } + }, + "type": "object", + "title": "ArrayType", + "description": "Parameter type for array values." + }, + "BasicScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "basic", + "title": "Type", + "default": "basic" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "BasicScoringFnParams", + "description": "Parameters for basic scoring function configuration." + }, + "Batch": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "object": { + "type": "string", + "const": "batch", + "title": "Object" + }, + "status": { + "type": "string", + "enum": [ + "validating", + "failed", + "in_progress", + "finalizing", + "completed", + "expired", + "cancelling", + "cancelled" + ], + "title": "Status" + }, + "cancelled_at": { + "title": "Cancelled At", + "type": "integer" + }, + "cancelling_at": { + "title": "Cancelling At", + "type": "integer" + }, + "completed_at": { + "title": "Completed At", + "type": "integer" + }, + "error_file_id": { + "title": "Error File Id", + "type": "string" + }, + "errors": { + "$ref": "#/components/schemas/Errors" + }, + "expired_at": { + "title": "Expired At", + "type": "integer" + }, + "expires_at": { + "title": "Expires At", + "type": "integer" + }, + "failed_at": { + "title": "Failed At", + "type": "integer" + }, + "finalizing_at": { + "title": "Finalizing At", + "type": "integer" + }, + "in_progress_at": { + "title": "In Progress At", + "type": "integer" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "model": { + "title": "Model", + "type": "string" + }, + "output_file_id": { + "title": "Output File Id", + "type": "string" + }, + "request_counts": { + "$ref": "#/components/schemas/BatchRequestCounts" + }, + "usage": { + "$ref": "#/components/schemas/BatchUsage" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "id", + "completion_window", + "created_at", + "endpoint", + "input_file_id", + "object", + "status" + ], + "title": "Batch" + }, + "BatchError": { + "properties": { + "code": { + "title": "Code", + "type": "string" + }, + "line": { + "title": "Line", + "type": "integer" + }, + "message": { + "title": "Message", + "type": "string" + }, + "param": { + "title": "Param", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "title": "BatchError" + }, + "BatchRequestCounts": { + "properties": { + "completed": { + "type": "integer", + "title": "Completed" + }, + "failed": { + "type": "integer", + "title": "Failed" + }, + "total": { + "type": "integer", + "title": "Total" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "completed", + "failed", + "total" + ], + "title": "BatchRequestCounts" + }, + "BatchUsage": { + "properties": { + "input_tokens": { + "type": "integer", + "title": "Input Tokens" + }, + "input_tokens_details": { + "$ref": "#/components/schemas/InputTokensDetails" + }, + "output_tokens": { + "type": "integer", + "title": "Output Tokens" + }, + "output_tokens_details": { + "$ref": "#/components/schemas/OutputTokensDetails" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "input_tokens", + "input_tokens_details", + "output_tokens", + "output_tokens_details", + "total_tokens" + ], + "title": "BatchUsage" + }, + "BooleanType": { + "properties": { + "type": { + "type": "string", + "const": "boolean", + "title": "Type", + "default": "boolean" + } + }, + "type": "object", + "title": "BooleanType", + "description": "Parameter type for boolean values." + }, + "ChatCompletionInputType": { + "properties": { + "type": { + "type": "string", + "const": "chat_completion_input", + "title": "Type", + "default": "chat_completion_input" + } + }, + "type": "object", + "title": "ChatCompletionInputType", + "description": "Parameter type for chat completion input." + }, + "Chunk-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "chunk_id": { + "type": "string", + "title": "Chunk Id" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "embedding": { + "title": "Embedding", + "items": { + "type": "number" + }, + "type": "array" + }, + "chunk_metadata": { + "$ref": "#/components/schemas/ChunkMetadata" + } + }, + "type": "object", + "required": [ + "content", + "chunk_id" + ], + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." + }, + "ChunkMetadata": { + "properties": { + "chunk_id": { + "title": "Chunk Id", + "type": "string" + }, + "document_id": { + "title": "Document Id", + "type": "string" + }, + "source": { + "title": "Source", + "type": "string" + }, + "created_timestamp": { + "title": "Created Timestamp", + "type": "integer" + }, + "updated_timestamp": { + "title": "Updated Timestamp", + "type": "integer" + }, + "chunk_window": { + "title": "Chunk Window", + "type": "string" + }, + "chunk_tokenizer": { + "title": "Chunk Tokenizer", + "type": "string" + }, + "chunk_embedding_model": { + "title": "Chunk Embedding Model", + "type": "string" + }, + "chunk_embedding_dimension": { + "title": "Chunk Embedding Dimension", + "type": "integer" + }, + "content_token_count": { + "title": "Content Token Count", + "type": "integer" + }, + "metadata_token_count": { + "title": "Metadata Token Count", + "type": "integer" + } + }, + "type": "object", + "title": "ChunkMetadata", + "description": "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference." + }, + "CompletionInputType": { + "properties": { + "type": { + "type": "string", + "const": "completion_input", + "title": "Type", + "default": "completion_input" + } + }, + "type": "object", + "title": "CompletionInputType", + "description": "Parameter type for completion input." + }, + "Conversation": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "The unique ID of the conversation." + }, + "object": { + "type": "string", + "const": "conversation", + "title": "Object", + "description": "The object type, which is always conversation.", + "default": "conversation" + }, + "created_at": { + "type": "integer", + "title": "Created At", + "description": "The time at which the conversation was created, measured in seconds since the Unix epoch." + }, + "metadata": { + "title": "Metadata", + "description": "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "items": { + "title": "Items", + "description": "Initial items to include in the conversation context. You may add up to 20 items at a time.", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "id", + "created_at" + ], + "title": "Conversation", + "description": "OpenAI-compatible conversation object." + }, + "ConversationItemInclude": { + "type": "string", + "enum": [ + "web_search_call.action.sources", + "code_interpreter_call.outputs", + "computer_call_output.output.image_url", + "file_search_call.results", + "message.input_image.image_url", + "message.output_text.logprobs", + "reasoning.encrypted_content" + ], + "title": "ConversationItemInclude", + "description": "Specify additional output data to include in the model response." + }, + "ConversationItemList": { + "properties": { + "object": { + "type": "string", + "title": "Object", + "description": "Object type", + "default": "list" + }, + "data": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Output" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/components/schemas/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Output", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "type": "array", + "title": "Data", + "description": "List of conversation items" + }, + "first_id": { + "title": "First Id", + "description": "The ID of the first item in the list", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "description": "The ID of the last item in the list", + "type": "string" + }, + "has_more": { + "type": "boolean", + "title": "Has More", + "description": "Whether there are more items available", + "default": false + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ConversationItemList", + "description": "List of conversation items with pagination." + }, + "DefaultRAGQueryGeneratorConfig": { + "properties": { + "type": { + "type": "string", + "const": "default", + "title": "Type", + "default": "default" + }, + "separator": { + "type": "string", + "title": "Separator", + "default": " " + } + }, + "type": "object", + "title": "DefaultRAGQueryGeneratorConfig", + "description": "Configuration for the default RAG query generator." + }, + "Errors": { + "properties": { + "data": { + "title": "Data", + "items": { + "$ref": "#/components/schemas/BatchError" + }, + "type": "array" + }, + "object": { + "title": "Object", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "title": "Errors" + }, + "HealthInfo": { + "properties": { + "status": { + "$ref": "#/components/schemas/HealthStatus" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "HealthInfo", + "description": "Health status information for the service." + }, + "HealthStatus": { + "type": "string", + "enum": [ + "OK", + "Error", + "Not Implemented" + ], + "title": "HealthStatus" + }, + "ImageContentItem-Output": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "InputTokensDetails": { + "properties": { + "cached_tokens": { + "type": "integer", + "title": "Cached Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "cached_tokens" + ], + "title": "InputTokensDetails" + }, + "JsonType": { + "properties": { + "type": { + "type": "string", + "const": "json", + "title": "Type", + "default": "json" + } + }, + "type": "object", + "title": "JsonType", + "description": "Parameter type for JSON values." + }, + "LLMAsJudgeScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "llm_as_judge", + "title": "Type", + "default": "llm_as_judge" + }, + "judge_model": { + "type": "string", + "title": "Judge Model" + }, + "prompt_template": { + "title": "Prompt Template", + "type": "string" + }, + "judge_score_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Judge Score Regexes", + "description": "Regexes to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "required": [ + "judge_model" + ], + "title": "LLMAsJudgeScoringFnParams", + "description": "Parameters for LLM-as-judge scoring function configuration." + }, + "LLMRAGQueryGeneratorConfig": { + "properties": { + "type": { + "type": "string", + "const": "llm", + "title": "Type", + "default": "llm" + }, + "model": { + "type": "string", + "title": "Model" + }, + "template": { + "type": "string", + "title": "Template" + } + }, + "type": "object", + "required": [ + "model", + "template" + ], + "title": "LLMRAGQueryGeneratorConfig", + "description": "Configuration for the LLM-based RAG query generator." + }, + "ListModelsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Model" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListModelsResponse" + }, + "ListPromptsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Prompt" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListPromptsResponse", + "description": "Response model to list prompts." + }, + "ListProvidersResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ProviderInfo" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListProvidersResponse", + "description": "Response containing a list of all available providers." + }, + "ListRoutesResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/RouteInfo" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListRoutesResponse", + "description": "Response containing a list of all available API routes." + }, + "ListScoringFunctionsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ScoringFn-Output" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListScoringFunctionsResponse" + }, + "ListShieldsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Shield" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListShieldsResponse" + }, + "ListToolGroupsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ToolGroup" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListToolGroupsResponse", + "description": "Response containing a list of tool groups." + }, + "MCPListToolsTool": { + "properties": { + "input_schema": { + "additionalProperties": true, + "type": "object", + "title": "Input Schema" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "type": "object", + "required": [ + "input_schema", + "name" + ], + "title": "MCPListToolsTool", + "description": "Tool definition returned by MCP list tools operation." + }, + "Model": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "model", + "title": "Type", + "default": "model" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this model" + }, + "model_type": { + "$ref": "#/components/schemas/ModelType", + "default": "llm" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "Model", + "description": "A model resource representing an AI model registered in Llama Stack." + }, + "ModelType": { + "type": "string", + "enum": [ + "llm", + "embedding", + "rerank" + ], + "title": "ModelType", + "description": "Enumeration of supported model types in Llama Stack." + }, + "ModerationObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "results": { + "items": { + "$ref": "#/components/schemas/ModerationObjectResults" + }, + "type": "array", + "title": "Results" + } + }, + "type": "object", + "required": [ + "id", + "model", + "results" + ], + "title": "ModerationObject", + "description": "A moderation object." + }, + "ModerationObjectResults": { + "properties": { + "flagged": { + "type": "boolean", + "title": "Flagged" + }, + "categories": { + "title": "Categories", + "additionalProperties": { + "type": "boolean" + }, + "type": "object" + }, + "category_applied_input_types": { + "title": "Category Applied Input Types", + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "type": "object" + }, + "category_scores": { + "title": "Category Scores", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "flagged" + ], + "title": "ModerationObjectResults", + "description": "A moderation object." + }, + "NumberType": { + "properties": { + "type": { + "type": "string", + "const": "number", + "title": "Type", + "default": "number" + } + }, + "type": "object", + "title": "NumberType", + "description": "Parameter type for numeric values." + }, + "ObjectType": { + "properties": { + "type": { + "type": "string", + "const": "object", + "title": "Type", + "default": "object" + } + }, + "type": "object", + "title": "ObjectType", + "description": "Parameter type for object values." + }, + "OpenAIAssistantMessageParam-Input": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIAssistantMessageParam-Output": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletion": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChoice-Output" + }, + "type": "array", + "title": "Choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "title": "Object", + "default": "chat.completion" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "model": { + "type": "string", + "title": "Model" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage" + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAIChatCompletion", + "description": "Response from an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletionContentPartImageParam": { + "properties": { + "type": { + "type": "string", + "const": "image_url", + "title": "Type", + "default": "image_url" + }, + "image_url": { + "$ref": "#/components/schemas/OpenAIImageURL" + } + }, + "type": "object", + "required": [ + "image_url" + ], + "title": "OpenAIChatCompletionContentPartImageParam", + "description": "Image content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionContentPartTextParam": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIChatCompletionContentPartTextParam", + "description": "Text content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "messages": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Input", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Input" + } + } + }, + "type": "array", + "minItems": 1, + "title": "Messages" + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ], + "title": "Function Call" + }, + "functions": { + "title": "Functions", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + "logit_bias": { + "title": "Logit Bias", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "logprobs": { + "title": "Logprobs", + "type": "boolean" + }, + "max_completion_tokens": { + "title": "Max Completion Tokens", + "type": "integer" + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "n": { + "title": "N", + "type": "integer" + }, + "parallel_tool_calls": { + "title": "Parallel Tool Calls", + "type": "boolean" + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number" + }, + "response_format": { + "title": "Response Format", + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseFormatText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONSchema" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONObject" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "json_object": "#/components/schemas/OpenAIResponseFormatJSONObject", + "json_schema": "#/components/schemas/OpenAIResponseFormatJSONSchema", + "text": "#/components/schemas/OpenAIResponseFormatText" + } + } + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Stop" + }, + "stream": { + "title": "Stream", + "type": "boolean" + }, + "stream_options": { + "title": "Stream Options", + "additionalProperties": true, + "type": "object" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ], + "title": "Tool Choice" + }, + "tools": { + "title": "Tools", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + "top_logprobs": { + "title": "Top Logprobs", + "type": "integer" + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "user": { + "title": "User", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "messages" + ], + "title": "OpenAIChatCompletionRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible chat completion endpoint." + }, + "OpenAIChatCompletionToolCall": { + "properties": { + "index": { + "title": "Index", + "type": "integer" + }, + "id": { + "title": "Id", + "type": "string" + }, + "type": { + "type": "string", + "const": "function", + "title": "Type", + "default": "function" + }, + "function": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction" + } + }, + "type": "object", + "title": "OpenAIChatCompletionToolCall", + "description": "Tool call specification for OpenAI-compatible chat completion responses." + }, + "OpenAIChatCompletionToolCallFunction": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "arguments": { + "title": "Arguments", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIChatCompletionToolCallFunction", + "description": "Function call details for OpenAI-compatible tool calls." + }, + "OpenAIChatCompletionUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "completion_tokens": { + "type": "integer", + "title": "Completion Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + }, + "prompt_tokens_details": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsagePromptTokensDetails" + }, + "completion_tokens_details": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsageCompletionTokensDetails" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "completion_tokens", + "total_tokens" + ], + "title": "OpenAIChatCompletionUsage", + "description": "Usage information for OpenAI chat completion." + }, + "OpenAIChatCompletionUsageCompletionTokensDetails": { + "properties": { + "reasoning_tokens": { + "title": "Reasoning Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIChatCompletionUsageCompletionTokensDetails", + "description": "Token details for output tokens in OpenAI chat completion usage." + }, + "OpenAIChatCompletionUsagePromptTokensDetails": { + "properties": { + "cached_tokens": { + "title": "Cached Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIChatCompletionUsagePromptTokensDetails", + "description": "Token details for prompt tokens in OpenAI chat completion usage." + }, + "OpenAIChoice-Output": { + "properties": { + "message": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Output" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Output" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message", + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Output", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Output" + } + } + }, + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Output" + } + }, + "type": "object", + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "description": "A choice from an OpenAI-compatible chat completion response." + }, + "OpenAIChoiceLogprobs-Output": { + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIChoiceLogprobs", + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." + }, + "OpenAICompletion": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAICompletionChoice-Output" + }, + "type": "array", + "title": "Choices" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "model": { + "type": "string", + "title": "Model" + }, + "object": { + "type": "string", + "const": "text_completion", + "title": "Object", + "default": "text_completion" + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAICompletion", + "description": "Response from an OpenAI-compatible completion request." + }, + "OpenAICompletionChoice-Output": { + "properties": { + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "text": { + "type": "string", + "title": "Text" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Output" + } + }, + "type": "object", + "required": [ + "finish_reason", + "text", + "index" + ], + "title": "OpenAICompletionChoice", + "description": "A choice from an OpenAI-compatible completion response." + }, + "OpenAICompletionRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "items": { + "items": { + "type": "integer" + }, + "type": "array" + }, + "type": "array" + } + ], + "title": "Prompt" + }, + "best_of": { + "title": "Best Of", + "type": "integer" + }, + "echo": { + "title": "Echo", + "type": "boolean" + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number" + }, + "logit_bias": { + "title": "Logit Bias", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "logprobs": { + "title": "Logprobs", + "type": "boolean" + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "n": { + "title": "N", + "type": "integer" + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number" + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Stop" + }, + "stream": { + "title": "Stream", + "type": "boolean" + }, + "stream_options": { + "title": "Stream Options", + "additionalProperties": true, + "type": "object" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "user": { + "title": "User", + "type": "string" + }, + "suffix": { + "title": "Suffix", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "prompt" + ], + "title": "OpenAICompletionRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible completion endpoint." + }, + "OpenAICreateVectorStoreFileBatchRequestWithExtraBody": { + "properties": { + "file_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "File Ids" + }, + "attributes": { + "title": "Attributes", + "additionalProperties": true, + "type": "object" + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "file_ids" + ], + "title": "OpenAICreateVectorStoreFileBatchRequestWithExtraBody", + "description": "Request to create a vector store file batch with extra_body support." + }, + "OpenAICreateVectorStoreRequestWithExtraBody": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "file_ids": { + "title": "File Ids", + "items": { + "type": "string" + }, + "type": "array" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object" + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "additionalProperties": true, + "type": "object", + "title": "OpenAICreateVectorStoreRequestWithExtraBody", + "description": "Request to create a vector store with extra_body support." + }, + "OpenAIDeveloperMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "developer", + "title": "Role", + "default": "developer" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIDeveloperMessageParam", + "description": "A message from the developer in an OpenAI-compatible chat completion request." + }, + "OpenAIEmbeddingData": { + "properties": { + "object": { + "type": "string", + "const": "embedding", + "title": "Object", + "default": "embedding" + }, + "embedding": { + "anyOf": [ + { + "items": { + "type": "number" + }, + "type": "array" + }, + { + "type": "string" + } + ], + "title": "Embedding" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "embedding", + "index" + ], + "title": "OpenAIEmbeddingData", + "description": "A single embedding data object from an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "total_tokens" + ], + "title": "OpenAIEmbeddingUsage", + "description": "Usage information for an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingsRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "input": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Input" + }, + "encoding_format": { + "title": "Encoding Format", + "default": "float", + "type": "string" + }, + "dimensions": { + "title": "Dimensions", + "type": "integer" + }, + "user": { + "title": "User", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "input" + ], + "title": "OpenAIEmbeddingsRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible embeddings endpoint." + }, + "OpenAIEmbeddingsResponse": { + "properties": { + "object": { + "type": "string", + "const": "list", + "title": "Object", + "default": "list" + }, + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIEmbeddingData" + }, + "type": "array", + "title": "Data" + }, + "model": { + "type": "string", + "title": "Model" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIEmbeddingUsage" + } + }, + "type": "object", + "required": [ + "data", + "model", + "usage" + ], + "title": "OpenAIEmbeddingsResponse", + "description": "Response from an OpenAI-compatible embeddings request." + }, + "OpenAIFile": { + "properties": { + "type": { + "type": "string", + "const": "file", + "title": "Type", + "default": "file" + }, + "file": { + "$ref": "#/components/schemas/OpenAIFileFile" + } + }, + "type": "object", + "required": [ + "file" + ], + "title": "OpenAIFile" + }, + "OpenAIFileFile": { + "properties": { + "file_data": { + "title": "File Data", + "type": "string" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIFileFile" + }, + "OpenAIFileObject": { + "properties": { + "object": { + "type": "string", + "const": "file", + "title": "Object", + "default": "file" + }, + "id": { + "type": "string", + "title": "Id" + }, + "bytes": { + "type": "integer", + "title": "Bytes" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "expires_at": { + "type": "integer", + "title": "Expires At" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "purpose": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } + }, + "type": "object", + "required": [ + "id", + "bytes", + "created_at", + "expires_at", + "filename", + "purpose" + ], + "title": "OpenAIFileObject", + "description": "OpenAI File object as defined in the OpenAI Files API." + }, + "OpenAIFilePurpose": { + "type": "string", + "enum": [ + "assistants", + "batch" + ], + "title": "OpenAIFilePurpose", + "description": "Valid purpose values for OpenAI Files API." + }, + "OpenAIImageURL": { + "properties": { + "url": { + "type": "string", + "title": "Url" + }, + "detail": { + "title": "Detail", + "type": "string" + } + }, + "type": "object", + "required": [ + "url" + ], + "title": "OpenAIImageURL", + "description": "Image URL specification for OpenAI-compatible chat completion messages." + }, + "OpenAIJSONSchema": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "strict": { + "title": "Strict", + "type": "boolean" + }, + "schema": { + "title": "Schema", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "title": "OpenAIJSONSchema", + "description": "JSON schema specification for OpenAI-compatible structured response format." + }, + "OpenAIResponseAnnotationCitation": { + "properties": { + "type": { + "type": "string", + "const": "url_citation", + "title": "Type", + "default": "url_citation" + }, + "end_index": { + "type": "integer", + "title": "End Index" + }, + "start_index": { + "type": "integer", + "title": "Start Index" + }, + "title": { + "type": "string", + "title": "Title" + }, + "url": { + "type": "string", + "title": "Url" + } + }, + "type": "object", + "required": [ + "end_index", + "start_index", + "title", + "url" + ], + "title": "OpenAIResponseAnnotationCitation", + "description": "URL citation annotation for referencing external web resources." + }, + "OpenAIResponseAnnotationContainerFileCitation": { + "properties": { + "type": { + "type": "string", + "const": "container_file_citation", + "title": "Type", + "default": "container_file_citation" + }, + "container_id": { + "type": "string", + "title": "Container Id" + }, + "end_index": { + "type": "integer", + "title": "End Index" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "start_index": { + "type": "integer", + "title": "Start Index" + } + }, + "type": "object", + "required": [ + "container_id", + "end_index", + "file_id", + "filename", + "start_index" + ], + "title": "OpenAIResponseAnnotationContainerFileCitation" + }, + "OpenAIResponseAnnotationFileCitation": { + "properties": { + "type": { + "type": "string", + "const": "file_citation", + "title": "Type", + "default": "file_citation" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "file_id", + "filename", + "index" + ], + "title": "OpenAIResponseAnnotationFileCitation", + "description": "File citation annotation for referencing specific files in response content." + }, + "OpenAIResponseAnnotationFilePath": { + "properties": { + "type": { + "type": "string", + "const": "file_path", + "title": "Type", + "default": "file_path" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "file_id", + "index" + ], + "title": "OpenAIResponseAnnotationFilePath" + }, + "OpenAIResponseContentPartRefusal": { + "properties": { + "type": { + "type": "string", + "const": "refusal", + "title": "Type", + "default": "refusal" + }, + "refusal": { + "type": "string", + "title": "Refusal" + } + }, + "type": "object", + "required": [ + "refusal" + ], + "title": "OpenAIResponseContentPartRefusal", + "description": "Refusal content within a streamed response part." + }, + "OpenAIResponseError": { + "properties": { + "code": { + "type": "string", + "title": "Code" + }, + "message": { + "type": "string", + "title": "Message" + } + }, + "type": "object", + "required": [ + "code", + "message" + ], + "title": "OpenAIResponseError", + "description": "Error details for failed OpenAI response requests." + }, + "OpenAIResponseFormatJSONObject": { + "properties": { + "type": { + "type": "string", + "const": "json_object", + "title": "Type", + "default": "json_object" + } + }, + "type": "object", + "title": "OpenAIResponseFormatJSONObject", + "description": "JSON object response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatJSONSchema": { + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "title": "Type", + "default": "json_schema" + }, + "json_schema": { + "$ref": "#/components/schemas/OpenAIJSONSchema" + } + }, + "type": "object", + "required": [ + "json_schema" + ], + "title": "OpenAIResponseFormatJSONSchema", + "description": "JSON schema response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatText": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + } + }, + "type": "object", + "title": "OpenAIResponseFormatText", + "description": "Text response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseInputFunctionToolCallOutput": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "output": { + "type": "string", + "title": "Output" + }, + "type": { + "type": "string", + "const": "function_call_output", + "title": "Type", + "default": "function_call_output" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "call_id", + "output" + ], + "title": "OpenAIResponseInputFunctionToolCallOutput", + "description": "This represents the output of a function call that gets passed back to the model." + }, + "OpenAIResponseInputMessageContentFile": { + "properties": { + "type": { + "type": "string", + "const": "input_file", + "title": "Type", + "default": "input_file" + }, + "file_data": { + "title": "File Data", + "type": "string" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "file_url": { + "title": "File Url", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIResponseInputMessageContentFile", + "description": "File content for input messages in OpenAI response format." + }, + "OpenAIResponseInputMessageContentImage": { + "properties": { + "detail": { + "anyOf": [ + { + "type": "string", + "const": "low" + }, + { + "type": "string", + "const": "high" + }, + { + "type": "string", + "const": "auto" + } + ], + "title": "Detail", + "default": "auto" + }, + "type": { + "type": "string", + "const": "input_image", + "title": "Type", + "default": "input_image" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "image_url": { + "title": "Image Url", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIResponseInputMessageContentImage", + "description": "Image content for input messages in OpenAI response format." + }, + "OpenAIResponseInputMessageContentText": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "input_text", + "title": "Type", + "default": "input_text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIResponseInputMessageContentText", + "description": "Text content for input messages in OpenAI response format." + }, + "OpenAIResponseInputToolFileSearch": { + "properties": { + "type": { + "type": "string", + "const": "file_search", + "title": "Type", + "default": "file_search" + }, + "vector_store_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Vector Store Ids" + }, + "filters": { + "title": "Filters", + "additionalProperties": true, + "type": "object" + }, + "max_num_results": { + "title": "Max Num Results", + "default": 10, + "type": "integer", + "maximum": 50.0, + "minimum": 1.0 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + } + }, + "type": "object", + "required": [ + "vector_store_ids" + ], + "title": "OpenAIResponseInputToolFileSearch", + "description": "File search tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolFunction": { + "properties": { + "type": { + "type": "string", + "const": "function", + "title": "Type", + "default": "function" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "parameters": { + "title": "Parameters", + "additionalProperties": true, + "type": "object" + }, + "strict": { + "title": "Strict", + "type": "boolean" + } + }, + "type": "object", + "required": [ + "name", + "parameters" + ], + "title": "OpenAIResponseInputToolFunction", + "description": "Function tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolMCP": { + "properties": { + "type": { + "type": "string", + "const": "mcp", + "title": "Type", + "default": "mcp" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "server_url": { + "type": "string", + "title": "Server Url" + }, + "headers": { + "title": "Headers", + "additionalProperties": true, + "type": "object" + }, + "require_approval": { + "anyOf": [ + { + "type": "string", + "const": "always" + }, + { + "type": "string", + "const": "never" + }, + { + "$ref": "#/components/schemas/ApprovalFilter" + } + ], + "title": "Require Approval", + "default": "never" + }, + "allowed_tools": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/AllowedToolsFilter" + } + ], + "title": "Allowed Tools" + } + }, + "type": "object", + "required": [ + "server_label", + "server_url" + ], + "title": "OpenAIResponseInputToolMCP", + "description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolWebSearch": { + "properties": { + "type": { + "anyOf": [ + { + "type": "string", + "const": "web_search" + }, + { + "type": "string", + "const": "web_search_preview" + }, + { + "type": "string", + "const": "web_search_preview_2025_03_11" + } + ], + "title": "Type", + "default": "web_search" + }, + "search_context_size": { + "title": "Search Context Size", + "default": "medium", + "type": "string", + "pattern": "^low|medium|high$" + } + }, + "type": "object", + "title": "OpenAIResponseInputToolWebSearch", + "description": "Web search tool configuration for OpenAI response inputs." + }, + "OpenAIResponseMCPApprovalRequest": { + "properties": { + "arguments": { + "type": "string", + "title": "Arguments" + }, + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "type": { + "type": "string", + "const": "mcp_approval_request", + "title": "Type", + "default": "mcp_approval_request" + } + }, + "type": "object", + "required": [ + "arguments", + "id", + "name", + "server_label" + ], + "title": "OpenAIResponseMCPApprovalRequest", + "description": "A request for human approval of a tool invocation." + }, + "OpenAIResponseMCPApprovalResponse": { + "properties": { + "approval_request_id": { + "type": "string", + "title": "Approval Request Id" + }, + "approve": { + "type": "boolean", + "title": "Approve" + }, + "type": { + "type": "string", + "const": "mcp_approval_response", + "title": "Type", + "default": "mcp_approval_response" + }, + "id": { + "title": "Id", + "type": "string" + }, + "reason": { + "title": "Reason", + "type": "string" + } + }, + "type": "object", + "required": [ + "approval_request_id", + "approve" + ], + "title": "OpenAIResponseMCPApprovalResponse", + "description": "A response to an MCP approval request." + }, + "OpenAIResponseMessage-Input": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "array" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "output_text": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "title": "Role" + }, + "type": { + "type": "string", + "const": "message", + "title": "Type", + "default": "message" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." + }, + "OpenAIResponseMessage-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "array" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "output_text": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "title": "Role" + }, + "type": { + "type": "string", + "const": "message", + "title": "Type", + "default": "message" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." + }, + "OpenAIResponseObject": { + "properties": { + "created_at": { + "type": "integer", + "title": "Created At" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError" + }, + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "object": { + "type": "string", + "const": "response", + "title": "Object", + "default": "response" + }, + "output": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Output" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Output", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "type": "array", + "title": "Output" + }, + "parallel_tool_calls": { + "type": "boolean", + "title": "Parallel Tool Calls", + "default": false + }, + "previous_response_id": { + "title": "Previous Response Id", + "type": "string" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "status": { + "type": "string", + "title": "Status" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "default": { + "format": { + "type": "text" + } + } + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "tools": { + "title": "Tools", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseToolMCP" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "type": "array" + }, + "truncation": { + "title": "Truncation", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIResponseUsage" + }, + "instructions": { + "title": "Instructions", + "type": "string" + } + }, + "type": "object", + "required": [ + "created_at", + "id", + "model", + "output", + "status" + ], + "title": "OpenAIResponseObject", + "description": "Complete OpenAI response object containing generation results and metadata." + }, + "OpenAIResponseOutputMessageContentOutputText": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "output_text", + "title": "Type", + "default": "output_text" + }, + "annotations": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation", + "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation", + "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath", + "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation" + } + } + }, + "type": "array", + "title": "Annotations" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIResponseOutputMessageContentOutputText" + }, + "OpenAIResponseOutputMessageFileSearchToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "queries": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Queries" + }, + "status": { + "type": "string", + "title": "Status" + }, + "type": { + "type": "string", + "const": "file_search_call", + "title": "Type", + "default": "file_search_call" + }, + "results": { + "title": "Results", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCallResults" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "id", + "queries", + "status" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCall", + "description": "File search tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageFileSearchToolCallResults": { + "properties": { + "attributes": { + "additionalProperties": true, + "type": "object", + "title": "Attributes" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "score": { + "type": "number", + "title": "Score" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "attributes", + "file_id", + "filename", + "score", + "text" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCallResults", + "description": "Search results returned by the file search operation." + }, + "OpenAIResponseOutputMessageFunctionToolCall": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "arguments": { + "type": "string", + "title": "Arguments" + }, + "type": { + "type": "string", + "const": "function_call", + "title": "Type", + "default": "function_call" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "call_id", + "name", + "arguments" + ], + "title": "OpenAIResponseOutputMessageFunctionToolCall", + "description": "Function tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "type": { + "type": "string", + "const": "mcp_call", + "title": "Type", + "default": "mcp_call" + }, + "arguments": { + "type": "string", + "title": "Arguments" + }, + "name": { + "type": "string", + "title": "Name" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "error": { + "title": "Error", + "type": "string" + }, + "output": { + "title": "Output", + "type": "string" + } + }, + "type": "object", + "required": [ + "id", + "arguments", + "name", + "server_label" + ], + "title": "OpenAIResponseOutputMessageMCPCall", + "description": "Model Context Protocol (MCP) call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPListTools": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "type": { + "type": "string", + "const": "mcp_list_tools", + "title": "Type", + "default": "mcp_list_tools" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "tools": { + "items": { + "$ref": "#/components/schemas/MCPListToolsTool" + }, + "type": "array", + "title": "Tools" + } + }, + "type": "object", + "required": [ + "id", + "server_label", + "tools" + ], + "title": "OpenAIResponseOutputMessageMCPListTools", + "description": "MCP list tools output message containing available tools from an MCP server." + }, + "OpenAIResponseOutputMessageWebSearchToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "status": { + "type": "string", + "title": "Status" + }, + "type": { + "type": "string", + "const": "web_search_call", + "title": "Type", + "default": "web_search_call" + } + }, + "type": "object", + "required": [ + "id", + "status" + ], + "title": "OpenAIResponseOutputMessageWebSearchToolCall", + "description": "Web search tool call output message for OpenAI responses." + }, + "OpenAIResponsePrompt": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "variables": { + "title": "Variables", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "object" + }, + "version": { + "title": "Version", + "type": "string" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "OpenAIResponsePrompt", + "description": "OpenAI compatible Prompt object that is used in OpenAI responses." + }, + "OpenAIResponseText": { + "properties": { + "format": { + "$ref": "#/components/schemas/OpenAIResponseTextFormat" + } + }, + "type": "object", + "title": "OpenAIResponseText", + "description": "Text response configuration for OpenAI responses." + }, + "OpenAIResponseTextFormat": { + "properties": { + "type": { + "anyOf": [ + { + "type": "string", + "const": "text" + }, + { + "type": "string", + "const": "json_schema" + }, + { + "type": "string", + "const": "json_object" + } + ], + "title": "Type" + }, + "name": { + "title": "Name", + "type": "string" + }, + "schema": { + "title": "Schema", + "additionalProperties": true, + "type": "object" + }, + "description": { + "title": "Description", + "type": "string" + }, + "strict": { + "title": "Strict", + "type": "boolean" + } + }, + "type": "object", + "title": "OpenAIResponseTextFormat", + "description": "Configuration for Responses API text format." + }, + "OpenAIResponseToolMCP": { + "properties": { + "type": { + "type": "string", + "const": "mcp", + "title": "Type", + "default": "mcp" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "allowed_tools": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/AllowedToolsFilter" + } + ], + "title": "Allowed Tools" + } + }, + "type": "object", + "required": [ + "server_label" + ], + "title": "OpenAIResponseToolMCP", + "description": "Model Context Protocol (MCP) tool configuration for OpenAI response object." + }, + "OpenAIResponseUsage": { + "properties": { + "input_tokens": { + "type": "integer", + "title": "Input Tokens" + }, + "output_tokens": { + "type": "integer", + "title": "Output Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + }, + "input_tokens_details": { + "$ref": "#/components/schemas/OpenAIResponseUsageInputTokensDetails" + }, + "output_tokens_details": { + "$ref": "#/components/schemas/OpenAIResponseUsageOutputTokensDetails" + } + }, + "type": "object", + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "OpenAIResponseUsage", + "description": "Usage information for OpenAI response." + }, + "OpenAIResponseUsageInputTokensDetails": { + "properties": { + "cached_tokens": { + "title": "Cached Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIResponseUsageInputTokensDetails", + "description": "Token details for input tokens in OpenAI response usage." + }, + "OpenAIResponseUsageOutputTokensDetails": { + "properties": { + "reasoning_tokens": { + "title": "Reasoning Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIResponseUsageOutputTokensDetails", + "description": "Token details for output tokens in OpenAI response usage." + }, + "OpenAISystemMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "system", + "title": "Role", + "default": "system" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAISystemMessageParam", + "description": "A system message providing instructions or context to the model." + }, + "OpenAITokenLogProb": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "title": "Bytes", + "items": { + "type": "integer" + }, + "type": "array" + }, + "logprob": { + "type": "number", + "title": "Logprob" + }, + "top_logprobs": { + "items": { + "$ref": "#/components/schemas/OpenAITopLogProb" + }, + "type": "array", + "title": "Top Logprobs" + } + }, + "type": "object", + "required": [ + "token", + "logprob", + "top_logprobs" + ], + "title": "OpenAITokenLogProb", + "description": "The log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIToolMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "tool_call_id": { + "type": "string", + "title": "Tool Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "tool_call_id", + "content" + ], + "title": "OpenAIToolMessageParam", + "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." + }, + "OpenAITopLogProb": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "title": "Bytes", + "items": { + "type": "integer" + }, + "type": "array" + }, + "logprob": { + "type": "number", + "title": "Logprob" + } + }, + "type": "object", + "required": [ + "token", + "logprob" + ], + "title": "OpenAITopLogProb", + "description": "The top log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIUserMessageParam-Input": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file": "#/components/schemas/OpenAIFile", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OpenAIUserMessageParam-Output": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file": "#/components/schemas/OpenAIFile", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "Order": { + "type": "string", + "enum": [ + "asc", + "desc" + ], + "title": "Order", + "description": "Sort order for paginated responses." + }, + "OutputTokensDetails": { + "properties": { + "reasoning_tokens": { + "type": "integer", + "title": "Reasoning Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "reasoning_tokens" + ], + "title": "OutputTokensDetails" + }, + "Prompt": { + "properties": { + "prompt": { + "title": "Prompt", + "description": "The system prompt with variable placeholders", + "type": "string" + }, + "version": { + "type": "integer", + "minimum": 1.0, + "title": "Version", + "description": "Version (integer starting at 1, incremented on save)" + }, + "prompt_id": { + "type": "string", + "title": "Prompt Id", + "description": "Unique identifier in format 'pmpt_<48-digit-hash>'" + }, + "variables": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Variables", + "description": "List of variable names that can be used in the prompt template" + }, + "is_default": { + "type": "boolean", + "title": "Is Default", + "description": "Boolean indicating whether this version is the default version", + "default": false + } + }, + "type": "object", + "required": [ + "version", + "prompt_id" + ], + "title": "Prompt", + "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack." + }, + "ProviderInfo": { + "properties": { + "api": { + "type": "string", + "title": "Api" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "provider_type": { + "type": "string", + "title": "Provider Type" + }, + "config": { + "additionalProperties": true, + "type": "object", + "title": "Config" + }, + "health": { + "additionalProperties": true, + "type": "object", + "title": "Health" + } + }, + "type": "object", + "required": [ + "api", + "provider_id", + "provider_type", + "config", + "health" + ], + "title": "ProviderInfo", + "description": "Information about a registered provider including its configuration and health status." + }, + "QueryChunksResponse": { + "properties": { + "chunks": { + "items": { + "$ref": "#/components/schemas/Chunk-Output" + }, + "type": "array", + "title": "Chunks" + }, + "scores": { + "items": { + "type": "number" + }, + "type": "array", + "title": "Scores" + } + }, + "type": "object", + "required": [ + "chunks", + "scores" + ], + "title": "QueryChunksResponse", + "description": "Response from querying chunks in a vector database." + }, + "RAGQueryConfig": { + "properties": { + "query_generator_config": { + "oneOf": [ + { + "$ref": "#/components/schemas/DefaultRAGQueryGeneratorConfig" + }, + { + "$ref": "#/components/schemas/LLMRAGQueryGeneratorConfig" + } + ], + "title": "Query Generator Config", + "default": { + "type": "default", + "separator": " " + }, + "discriminator": { + "propertyName": "type", + "mapping": { + "default": "#/components/schemas/DefaultRAGQueryGeneratorConfig", + "llm": "#/components/schemas/LLMRAGQueryGeneratorConfig" + } + } + }, + "max_tokens_in_context": { + "type": "integer", + "title": "Max Tokens In Context", + "default": 4096 + }, + "max_chunks": { + "type": "integer", + "title": "Max Chunks", + "default": 5 + }, + "chunk_template": { + "type": "string", + "title": "Chunk Template", + "default": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n" + }, + "mode": { + "default": "vector", + "$ref": "#/components/schemas/RAGSearchMode" + }, + "ranker": { + "title": "Ranker", + "oneOf": [ + { + "$ref": "#/components/schemas/RRFRanker" + }, + { + "$ref": "#/components/schemas/WeightedRanker" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "rrf": "#/components/schemas/RRFRanker", + "weighted": "#/components/schemas/WeightedRanker" + } + } + } + }, + "type": "object", + "title": "RAGQueryConfig", + "description": "Configuration for the RAG query generation." + }, + "RAGQueryResult": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "title": "RAGQueryResult", + "description": "Result of a RAG query containing retrieved content and metadata." + }, + "RAGSearchMode": { + "type": "string", + "enum": [ + "vector", + "keyword", + "hybrid" + ], + "title": "RAGSearchMode", + "description": "Search modes for RAG query retrieval:\n- VECTOR: Uses vector similarity search for semantic matching\n- KEYWORD: Uses keyword-based search for exact matching\n- HYBRID: Combines both vector and keyword search for better results" + }, + "RRFRanker": { + "properties": { + "type": { + "type": "string", + "const": "rrf", + "title": "Type", + "default": "rrf" + }, + "impact_factor": { + "type": "number", + "title": "Impact Factor", + "default": 60.0, + "minimum": 0.0 + } + }, + "type": "object", + "title": "RRFRanker", + "description": "Reciprocal Rank Fusion (RRF) ranker configuration." + }, + "RegexParserScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "regex_parser", + "title": "Type", + "default": "regex_parser" + }, + "parsing_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Parsing Regexes", + "description": "Regex to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "RegexParserScoringFnParams", + "description": "Parameters for regex parser scoring function configuration." + }, + "RouteInfo": { + "properties": { + "route": { + "type": "string", + "title": "Route" + }, + "method": { + "type": "string", + "title": "Method" + }, + "provider_types": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Provider Types" + } + }, + "type": "object", + "required": [ + "route", + "method", + "provider_types" + ], + "title": "RouteInfo", + "description": "Information about an API route including its path, method, and implementing providers." + }, + "RunShieldResponse": { + "properties": { + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "title": "RunShieldResponse", + "description": "Response from running a safety shield." + }, + "SafetyViolation": { + "properties": { + "violation_level": { + "$ref": "#/components/schemas/ViolationLevel" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "violation_level" + ], + "title": "SafetyViolation", + "description": "Details of a safety violation detected by content moderation." + }, + "ScoreBatchResponse": { + "properties": { + "dataset_id": { + "title": "Dataset Id", + "type": "string" + }, + "results": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "results" + ], + "title": "ScoreBatchResponse", + "description": "Response from batch scoring operations on datasets." + }, + "ScoreResponse": { + "properties": { + "results": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "results" + ], + "title": "ScoreResponse", + "description": "The response from scoring." + }, + "ScoringFn-Output": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "scoring_function", + "title": "Type", + "default": "scoring_function" + }, + "description": { + "title": "Description", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this definition" + }, + "return_type": { + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type", + "description": "The return type of the deterministic function", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent_turn_input": "#/components/schemas/AgentTurnInputType", + "array": "#/components/schemas/ArrayType", + "boolean": "#/components/schemas/BooleanType", + "chat_completion_input": "#/components/schemas/ChatCompletionInputType", + "completion_input": "#/components/schemas/CompletionInputType", + "json": "#/components/schemas/JsonType", + "number": "#/components/schemas/NumberType", + "object": "#/components/schemas/ObjectType", + "string": "#/components/schemas/StringType", + "union": "#/components/schemas/UnionType" + } + } + }, + "params": { + "title": "Params", + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "description": "A scoring function resource for evaluating model outputs." + }, + "ScoringResult": { + "properties": { + "score_rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Score Rows" + }, + "aggregated_results": { + "additionalProperties": true, + "type": "object", + "title": "Aggregated Results" + } + }, + "type": "object", + "required": [ + "score_rows", + "aggregated_results" + ], + "title": "ScoringResult", + "description": "A scoring result for a single row." + }, + "SearchRankingOptions": { + "properties": { + "ranker": { + "title": "Ranker", + "type": "string" + }, + "score_threshold": { + "title": "Score Threshold", + "default": 0.0, + "type": "number" + } + }, + "type": "object", + "title": "SearchRankingOptions", + "description": "Options for ranking and filtering search results." + }, + "Shield": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "shield", + "title": "Type", + "default": "shield" + }, + "params": { + "title": "Params", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "Shield", + "description": "A safety shield resource that can be used to check content." + }, + "StringType": { + "properties": { + "type": { + "type": "string", + "const": "string", + "title": "Type", + "default": "string" + } + }, + "type": "object", + "title": "StringType", + "description": "Parameter type for string values." + }, + "TextContentItem": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "TextContentItem", + "description": "A text content item" + }, + "ToolDef": { + "properties": { + "toolgroup_id": { + "title": "Toolgroup Id", + "type": "string" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "input_schema": { + "title": "Input Schema", + "additionalProperties": true, + "type": "object" + }, + "output_schema": { + "title": "Output Schema", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ToolDef", + "description": "Tool definition used in runtime contexts." + }, + "ToolGroup": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "tool_group", + "title": "Type", + "default": "tool_group" + }, + "mcp_endpoint": { + "$ref": "#/components/schemas/URL" + }, + "args": { + "title": "Args", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "ToolGroup", + "description": "A group of related tools managed together." + }, + "ToolInvocationResult": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "error_message": { + "title": "Error Message", + "type": "string" + }, + "error_code": { + "title": "Error Code", + "type": "integer" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "title": "ToolInvocationResult", + "description": "Result of a tool invocation." + }, + "URL": { + "properties": { + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URL", + "description": "A URL reference to external content." + }, + "UnionType": { + "properties": { + "type": { + "type": "string", + "const": "union", + "title": "Type", + "default": "union" + } + }, + "type": "object", + "title": "UnionType", + "description": "Parameter type for union values." + }, + "VectorStoreChunkingStrategyAuto": { + "properties": { + "type": { + "type": "string", + "const": "auto", + "title": "Type", + "default": "auto" + } + }, + "type": "object", + "title": "VectorStoreChunkingStrategyAuto", + "description": "Automatic chunking strategy for vector store files." + }, + "VectorStoreChunkingStrategyStatic": { + "properties": { + "type": { + "type": "string", + "const": "static", + "title": "Type", + "default": "static" + }, + "static": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStaticConfig" + } + }, + "type": "object", + "required": [ + "static" + ], + "title": "VectorStoreChunkingStrategyStatic", + "description": "Static chunking strategy with configurable parameters." + }, + "VectorStoreChunkingStrategyStaticConfig": { + "properties": { + "chunk_overlap_tokens": { + "type": "integer", + "title": "Chunk Overlap Tokens", + "default": 400 + }, + "max_chunk_size_tokens": { + "type": "integer", + "maximum": 4096.0, + "minimum": 100.0, + "title": "Max Chunk Size Tokens", + "default": 800 + } + }, + "type": "object", + "title": "VectorStoreChunkingStrategyStaticConfig", + "description": "Configuration for static chunking strategy." + }, + "VectorStoreContent": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "type", + "text" + ], + "title": "VectorStoreContent", + "description": "Content item from a vector store file or search result." + }, + "VectorStoreFileBatchObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.file_batch" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "status": { + "anyOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ], + "title": "Status" + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts" + } + }, + "type": "object", + "required": [ + "id", + "created_at", + "vector_store_id", + "status", + "file_counts" + ], + "title": "VectorStoreFileBatchObject", + "description": "OpenAI Vector Store File Batch object." + }, + "VectorStoreFileCounts": { + "properties": { + "completed": { + "type": "integer", + "title": "Completed" + }, + "cancelled": { + "type": "integer", + "title": "Cancelled" + }, + "failed": { + "type": "integer", + "title": "Failed" + }, + "in_progress": { + "type": "integer", + "title": "In Progress" + }, + "total": { + "type": "integer", + "title": "Total" + } + }, + "type": "object", + "required": [ + "completed", + "cancelled", + "failed", + "in_progress", + "total" + ], + "title": "VectorStoreFileCounts", + "description": "File processing status counts for a vector store." + }, + "VectorStoreFileLastError": { + "properties": { + "code": { + "anyOf": [ + { + "type": "string", + "const": "server_error" + }, + { + "type": "string", + "const": "rate_limit_exceeded" + } + ], + "title": "Code" + }, + "message": { + "type": "string", + "title": "Message" + } + }, + "type": "object", + "required": [ + "code", + "message" + ], + "title": "VectorStoreFileLastError", + "description": "Error information for failed vector store file processing." + }, + "VectorStoreFileObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.file" + }, + "attributes": { + "additionalProperties": true, + "type": "object", + "title": "Attributes" + }, + "chunking_strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy", + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "last_error": { + "$ref": "#/components/schemas/VectorStoreFileLastError" + }, + "status": { + "anyOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ], + "title": "Status" + }, + "usage_bytes": { + "type": "integer", + "title": "Usage Bytes", + "default": 0 + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "id", + "chunking_strategy", + "created_at", + "status", + "vector_store_id" + ], + "title": "VectorStoreFileObject", + "description": "OpenAI Vector Store File object." + }, + "VectorStoreObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "name": { + "title": "Name", + "type": "string" + }, + "usage_bytes": { + "type": "integer", + "title": "Usage Bytes", + "default": 0 + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts" + }, + "status": { + "type": "string", + "title": "Status", + "default": "completed" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object" + }, + "expires_at": { + "title": "Expires At", + "type": "integer" + }, + "last_active_at": { + "title": "Last Active At", + "type": "integer" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "id", + "created_at", + "file_counts" + ], + "title": "VectorStoreObject", + "description": "OpenAI Vector Store object." + }, + "VectorStoreSearchResponse": { + "properties": { + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "score": { + "type": "number", + "title": "Score" + }, + "attributes": { + "title": "Attributes", + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "boolean" + } + ] + }, + "type": "object" + }, + "content": { + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "type": "array", + "title": "Content" + } + }, + "type": "object", + "required": [ + "file_id", + "filename", + "score", + "content" + ], + "title": "VectorStoreSearchResponse", + "description": "Response from searching a vector store." + }, + "VectorStoreSearchResponsePage": { + "properties": { + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.search_results.page" + }, + "search_query": { + "type": "string", + "title": "Search Query" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreSearchResponse" + }, + "type": "array", + "title": "Data" + }, + "has_more": { + "type": "boolean", + "title": "Has More", + "default": false + }, + "next_page": { + "title": "Next Page", + "type": "string" + } + }, + "type": "object", + "required": [ + "search_query", + "data" + ], + "title": "VectorStoreSearchResponsePage", + "description": "Paginated response from searching a vector store." + }, + "VersionInfo": { + "properties": { + "version": { + "type": "string", + "title": "Version" + } + }, + "type": "object", + "required": [ + "version" + ], + "title": "VersionInfo", + "description": "Version information for the service." + }, + "ViolationLevel": { + "type": "string", + "enum": [ + "info", + "warn", + "error" + ], + "title": "ViolationLevel", + "description": "Severity level of a safety violation." + }, + "WeightedRanker": { + "properties": { + "type": { + "type": "string", + "const": "weighted", + "title": "Type", + "default": "weighted" + }, + "alpha": { + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "title": "Alpha", + "description": "Weight factor between 0 and 1. 0 means only keyword scores, 1 means only vector scores.", + "default": 0.5 + } + }, + "type": "object", + "title": "WeightedRanker", + "description": "Weighted ranker configuration that combines vector and keyword scores." + }, + "_URLOrData": { + "properties": { + "url": { + "$ref": "#/components/schemas/URL" + }, + "data": { + "contentEncoding": "base64", + "title": "Data", + "type": "string" + } + }, + "type": "object", + "title": "_URLOrData", + "description": "A URL or a base64 encoded string" + }, + "_batches_Request": { + "properties": { + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "idempotency_key": { + "type": "string", + "title": "Idempotency Key" + } + }, + "type": "object", + "required": [ + "input_file_id", + "endpoint", + "completion_window", + "metadata", + "idempotency_key" + ], + "title": "_batches_Request" + }, + "_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + } + }, + "type": "object", + "required": [ + "batch_id" + ], + "title": "_batches_batch_id_cancel_Request" + }, + "_conversations_Request": { + "properties": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Input" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "title": "Items", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/components/schemas/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Input", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "items", + "metadata" + ], + "title": "_conversations_Request" + }, + "_conversations_conversation_id_Request": { + "properties": { + "conversation_id": { + "type": "string", + "title": "Conversation Id" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "conversation_id", + "metadata" + ], + "title": "_conversations_conversation_id_Request" + }, + "_conversations_conversation_id_items_Request": { + "properties": { + "conversation_id": { + "type": "string", + "title": "Conversation Id" + }, + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Input" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "title": "Items" + } + }, + "type": "object", + "required": [ + "conversation_id", + "items" + ], + "title": "_conversations_conversation_id_items_Request" + }, + "_models_Request": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id" + }, + "provider_model_id": { + "type": "string", + "title": "Provider Model Id" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "model_type": { + "$ref": "#/components/schemas/ModelType" + } + }, + "type": "object", + "required": [ + "model_id", + "provider_model_id", + "provider_id", + "metadata", + "model_type" + ], + "title": "_models_Request" + }, + "_moderations_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + } + }, + "type": "object", + "required": [ + "input", + "model" + ], + "title": "_moderations_Request" + }, + "_prompts_Request": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt" + }, + "variables": { + "type": "string", + "title": "Variables" + } + }, + "type": "object", + "required": [ + "prompt", + "variables" + ], + "title": "_prompts_Request" + }, + "_prompts_prompt_id_Request": { + "properties": { + "prompt_id": { + "type": "string", + "title": "Prompt Id" + }, + "prompt": { + "type": "string", + "title": "Prompt" + }, + "version": { + "type": "integer", + "title": "Version" + }, + "variables": { + "type": "string", + "title": "Variables" + }, + "set_as_default": { + "type": "boolean", + "title": "Set As Default", + "default": true + } + }, + "type": "object", + "required": [ + "prompt_id", + "prompt", + "version", + "variables" + ], + "title": "_prompts_prompt_id_Request" + }, + "_prompts_prompt_id_set_default_version_Request": { + "properties": { + "prompt_id": { + "type": "string", + "title": "Prompt Id" + }, + "version": { + "type": "integer", + "title": "Version" + } + }, + "type": "object", + "required": [ + "prompt_id", + "version" + ], + "title": "_prompts_prompt_id_set_default_version_Request" + }, + "_responses_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "previous_response_id": { + "type": "string", + "title": "Previous Response Id" + }, + "conversation": { + "type": "string", + "title": "Conversation" + }, + "store": { + "type": "boolean", + "title": "Store", + "default": true + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "temperature": { + "type": "number", + "title": "Temperature" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText" + }, + "tools": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolMCP" + } + ], + "title": "Tools", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseInputToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "include": { + "type": "string", + "title": "Include" + }, + "max_infer_iters": { + "type": "integer", + "title": "Max Infer Iters", + "default": 10 + } + }, + "type": "object", + "required": [ + "input", + "model", + "prompt", + "instructions", + "previous_response_id", + "conversation", + "temperature", + "text", + "tools", + "include" + ], + "title": "_responses_Request" + }, + "_scoring_score_Request": { + "properties": { + "input_rows": { + "type": "string", + "title": "Input Rows" + }, + "scoring_functions": { + "type": "string", + "title": "Scoring Functions" + } + }, + "type": "object", + "required": [ + "input_rows", + "scoring_functions" + ], + "title": "_scoring_score_Request" + }, + "_scoring_score_batch_Request": { + "properties": { + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "scoring_functions": { + "type": "string", + "title": "Scoring Functions" + }, + "save_results_dataset": { + "type": "boolean", + "title": "Save Results Dataset", + "default": false + } + }, + "type": "object", + "required": [ + "dataset_id", + "scoring_functions" + ], + "title": "_scoring_score_batch_Request" + }, + "_shields_Request": { + "properties": { + "shield_id": { + "type": "string", + "title": "Shield Id" + }, + "provider_shield_id": { + "type": "string", + "title": "Provider Shield Id" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "params": { + "type": "string", + "title": "Params" + } + }, + "type": "object", + "required": [ + "shield_id", + "provider_shield_id", + "provider_id", + "params" + ], + "title": "_shields_Request" + }, + "_tool_runtime_invoke_Request": { + "properties": { + "tool_name": { + "type": "string", + "title": "Tool Name" + }, + "kwargs": { + "type": "string", + "title": "Kwargs" + } + }, + "type": "object", + "required": [ + "tool_name", + "kwargs" + ], + "title": "_tool_runtime_invoke_Request" + }, + "_tool_runtime_rag_tool_query_Request": { + "properties": { + "content": { + "type": "string", + "title": "Content" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "query_config": { + "$ref": "#/components/schemas/RAGQueryConfig" + } + }, + "type": "object", + "required": [ + "content", + "vector_store_ids", + "query_config" + ], + "title": "_tool_runtime_rag_tool_query_Request" + }, + "_vector_io_query_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "params": { + "type": "string", + "title": "Params" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "params" + ], + "title": "_vector_io_query_Request" + }, + "_vector_stores_vector_store_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "expires_after": { + "type": "string", + "title": "Expires After" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "name", + "expires_after", + "metadata" + ], + "title": "_vector_stores_vector_store_id_Request" + }, + "_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "batch_id", + "vector_store_id" + ], + "title": "_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + }, + "_vector_stores_vector_store_id_files_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + }, + "chunking_strategy": { + "anyOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes", + "chunking_strategy" + ], + "title": "_vector_stores_vector_store_id_files_Request" + }, + "_vector_stores_vector_store_id_files_file_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes" + ], + "title": "_vector_stores_vector_store_id_files_file_id_Request" + }, + "_vector_stores_vector_store_id_search_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "filters": { + "type": "string", + "title": "Filters" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results", + "default": 10 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + }, + "rewrite_query": { + "type": "boolean", + "title": "Rewrite Query", + "default": false + }, + "search_mode": { + "type": "string", + "title": "Search Mode", + "default": "vector" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "filters", + "ranking_options" + ], + "title": "_vector_stores_vector_store_id_search_Request" + }, + "Error": { + "description": "Error response from the API. Roughly follows RFC 7807.", + "properties": { + "status": { + "title": "Status", + "type": "integer" + }, + "title": { + "title": "Title", + "type": "string" + }, + "detail": { + "title": "Detail", + "type": "string" + }, + "instance": { + "title": "Instance", + "type": "string", + "nullable": true + } + }, + "required": [ + "status", + "title", + "detail" + ], + "title": "Error", + "type": "object" + }, + "ListOpenAIResponseInputItem": { + "description": "List container for OpenAI response input items.", + "properties": { + "data": { + "items": { + "anyOf": [ + { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "title": "Data", + "type": "array" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data" + ], + "title": "ListOpenAIResponseInputItem", + "type": "object" + }, + "ListOpenAIResponseObject": { + "description": "Paginated list of OpenAI response objects with navigation metadata.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIResponseObjectWithInput" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIResponseObject", + "type": "object" + }, + "OpenAIDeleteResponseObject": { + "description": "Response object confirming deletion of an OpenAI response.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "const": "response", + "default": "response", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "OpenAIDeleteResponseObject", + "type": "object" + }, + "ListBatchesResponse": { + "description": "Response containing a list of batch objects.", + "properties": { + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "description": "List of batch objects", + "items": { + "$ref": "#/components/schemas/Batch" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "description": "ID of the first batch in the list", + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "description": "ID of the last batch in the list", + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "description": "Whether there are more batches available", + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "ListBatchesResponse", + "type": "object" + }, + "ConversationDeletedResource": { + "description": "Response for deleted conversation.", + "properties": { + "id": { + "description": "The deleted conversation identifier", + "title": "Id", + "type": "string" + }, + "object": { + "default": "conversation.deleted", + "description": "Object type", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "description": "Whether the object was deleted", + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "ConversationDeletedResource", + "type": "object" + }, + "ConversationItemDeletedResource": { + "description": "Response for deleted conversation item.", + "properties": { + "id": { + "description": "The deleted item identifier", + "title": "Id", + "type": "string" + }, + "object": { + "default": "conversation.item.deleted", + "description": "Object type", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "description": "Whether the object was deleted", + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "ConversationItemDeletedResource", + "type": "object" + }, + "ListOpenAIFileResponse": { + "description": "Response for listing files in OpenAI Files API.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIFileObject" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIFileResponse", + "type": "object" + }, + "OpenAIFileDeleteResponse": { + "description": "Response for deleting a file in OpenAI Files API.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "const": "file", + "default": "file", + "title": "Object", + "type": "string" + }, + "deleted": { + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id", + "deleted" + ], + "title": "OpenAIFileDeleteResponse", + "type": "object" + }, + "ListOpenAIChatCompletionResponse": { + "description": "Response from listing OpenAI-compatible chat completions.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIChatCompletionResponse", + "type": "object" + }, + "OpenAIAssistantMessageParam": { + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.", + "properties": { + "role": { + "const": "assistant", + "default": "assistant", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content", + "nullable": true + }, + "name": { + "title": "Name", + "type": "string", + "nullable": true + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAIAssistantMessageParam", + "type": "object" + }, + "OpenAIChoice": { + "description": "A choice from an OpenAI-compatible chat completion response.", + "properties": { + "message": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/OpenAIAssistantMessageParam", + "developer": "#/$defs/OpenAIDeveloperMessageParam", + "system": "#/$defs/OpenAISystemMessageParam", + "tool": "#/$defs/OpenAIToolMessageParam", + "user": "#/$defs/OpenAIUserMessageParam" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message" + }, + "finish_reason": { + "title": "Finish Reason", + "type": "string" + }, + "index": { + "title": "Index", + "type": "integer" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "nullable": true + } + }, + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "type": "object" + }, + "OpenAIChoiceLogprobs": { + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.", + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAIChoiceLogprobs", + "type": "object" + }, + "OpenAICompletionWithInputMessages": { + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "title": "Choices", + "type": "array" + }, + "object": { + "const": "chat.completion", + "default": "chat.completion", + "title": "Object", + "type": "string" + }, + "created": { + "title": "Created", + "type": "integer" + }, + "model": { + "title": "Model", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage", + "nullable": true + }, + "input_messages": { + "items": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/OpenAIAssistantMessageParam", + "developer": "#/$defs/OpenAIDeveloperMessageParam", + "system": "#/$defs/OpenAISystemMessageParam", + "tool": "#/$defs/OpenAIToolMessageParam", + "user": "#/$defs/OpenAIUserMessageParam" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ] + }, + "title": "Input Messages", + "type": "array" + } + }, + "required": [ + "id", + "choices", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages", + "type": "object" + }, + "OpenAIUserMessageParam": { + "description": "A message from the user in an OpenAI-compatible chat completion request.", + "properties": { + "role": { + "const": "user", + "default": "user", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "discriminator": { + "mapping": { + "file": "#/$defs/OpenAIFile", + "image_url": "#/$defs/OpenAIChatCompletionContentPartImageParam", + "text": "#/$defs/OpenAIChatCompletionContentPartTextParam" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string", + "nullable": true + } + }, + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "type": "object" + }, + "ScoringFn": { + "description": "A scoring function resource for evaluating model outputs.", + "properties": { + "identifier": { + "description": "Unique identifier for this resource in llama stack", + "title": "Identifier", + "type": "string" + }, + "provider_resource_id": { + "description": "Unique identifier for this resource in the provider", + "title": "Provider Resource Id", + "type": "string", + "nullable": true + }, + "provider_id": { + "description": "ID of the provider that owns this resource", + "title": "Provider Id", + "type": "string" + }, + "type": { + "const": "scoring_function", + "default": "scoring_function", + "title": "Type", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string", + "nullable": true + }, + "metadata": { + "additionalProperties": true, + "description": "Any additional metadata for this definition", + "title": "Metadata", + "type": "object" + }, + "return_type": { + "description": "The return type of the deterministic function", + "discriminator": { + "mapping": { + "agent_turn_input": "#/$defs/AgentTurnInputType", + "array": "#/$defs/ArrayType", + "boolean": "#/$defs/BooleanType", + "chat_completion_input": "#/$defs/ChatCompletionInputType", + "completion_input": "#/$defs/CompletionInputType", + "json": "#/$defs/JsonType", + "number": "#/$defs/NumberType", + "object": "#/$defs/ObjectType", + "string": "#/$defs/StringType", + "union": "#/$defs/UnionType" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type" + }, + "params": { + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "title": "Params", + "discriminator": { + "mapping": { + "basic": "#/$defs/BasicScoringFnParams", + "llm_as_judge": "#/$defs/LLMAsJudgeScoringFnParams", + "regex_parser": "#/$defs/RegexParserScoringFnParams" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "nullable": true + } + }, + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "type": "object" + }, + "ListToolDefsResponse": { + "description": "Response containing a list of tool definitions.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "title": "Data", + "type": "array" + } + }, + "required": [ + "data" + ], + "title": "ListToolDefsResponse", + "type": "object" + }, + "VectorStoreDeleteResponse": { + "description": "Response from deleting a vector store.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "default": "vector_store.deleted", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "VectorStoreDeleteResponse", + "type": "object" + }, + "VectorStoreFileContentsResponse": { + "description": "Response from retrieving the contents of a vector store file.", + "properties": { + "file_id": { + "title": "File Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + }, + "attributes": { + "additionalProperties": true, + "title": "Attributes", + "type": "object" + }, + "content": { + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "title": "Content", + "type": "array" + } + }, + "required": [ + "file_id", + "filename", + "attributes", + "content" + ], + "title": "VectorStoreFileContentsResponse", + "type": "object" + }, + "VectorStoreFileDeleteResponse": { + "description": "Response from deleting a vector store file.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "default": "vector_store.file.deleted", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "VectorStoreFileDeleteResponse", + "type": "object" + }, + "VectorStoreFilesListInBatchResponse": { + "description": "Response from listing files in a vector store file batch.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreFileObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreFilesListInBatchResponse", + "type": "object" + }, + "VectorStoreListFilesResponse": { + "description": "Response from listing files in a vector store.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreFileObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreListFilesResponse", + "type": "object" + }, + "VectorStoreListResponse": { + "description": "Response from listing vector stores.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreListResponse", + "type": "object" + }, + "OpenAIResponseMessage": { + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "discriminator": { + "mapping": { + "input_file": "#/$defs/OpenAIResponseInputMessageContentFile", + "input_image": "#/$defs/OpenAIResponseInputMessageContentImage", + "input_text": "#/$defs/OpenAIResponseInputMessageContentText" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ] + }, + "type": "array" + }, + { + "items": { + "discriminator": { + "mapping": { + "output_text": "#/$defs/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/$defs/OpenAIResponseContentPartRefusal" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "const": "system", + "type": "string" + }, + { + "const": "developer", + "type": "string" + }, + { + "const": "user", + "type": "string" + }, + { + "const": "assistant", + "type": "string" + } + ], + "title": "Role" + }, + "type": { + "const": "message", + "default": "message", + "title": "Type", + "type": "string" + }, + "id": { + "title": "Id", + "type": "string", + "nullable": true + }, + "status": { + "title": "Status", + "type": "string", + "nullable": true + } + }, + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "type": "object" + }, + "OpenAIResponseObjectWithInput": { + "description": "OpenAI response object extended with input context information.", + "properties": { + "created_at": { + "title": "Created At", + "type": "integer" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError", + "nullable": true + }, + "id": { + "title": "Id", + "type": "string" + }, + "model": { + "title": "Model", + "type": "string" + }, + "object": { + "const": "response", + "default": "response", + "title": "Object", + "type": "string" + }, + "output": { + "items": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + "title": "Output", + "type": "array" + }, + "parallel_tool_calls": { + "default": false, + "title": "Parallel Tool Calls", + "type": "boolean" + }, + "previous_response_id": { + "title": "Previous Response Id", + "type": "string", + "nullable": true + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt", + "nullable": true + }, + "status": { + "title": "Status", + "type": "string" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "nullable": true + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "default": { + "format": { + "type": "text" + } + } + }, + "top_p": { + "title": "Top P", + "type": "number", + "nullable": true + }, + "tools": { + "title": "Tools", + "items": { + "discriminator": { + "mapping": { + "file_search": "#/$defs/OpenAIResponseInputToolFileSearch", + "function": "#/$defs/OpenAIResponseInputToolFunction", + "mcp": "#/$defs/OpenAIResponseToolMCP", + "web_search": "#/$defs/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/$defs/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/$defs/OpenAIResponseInputToolWebSearch" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseToolMCP" + } + ] + }, + "type": "array", + "nullable": true + }, + "truncation": { + "title": "Truncation", + "type": "string", + "nullable": true + }, + "usage": { + "$ref": "#/components/schemas/OpenAIResponseUsage", + "nullable": true + }, + "instructions": { + "title": "Instructions", + "type": "string", + "nullable": true + }, + "input": { + "items": { + "anyOf": [ + { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "title": "Input", + "type": "array" + } + }, + "required": [ + "created_at", + "id", + "model", + "output", + "status", + "input" + ], + "title": "OpenAIResponseObjectWithInput", + "type": "object" + }, + "_safety_run_shield_Request": { + "properties": { + "shield_id": { + "title": "Shield Id", + "type": "string" + }, + "messages": { + "anyOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Messages" + }, + "params": { + "title": "Params", + "type": "string" + } + }, + "required": [ + "shield_id", + "messages", + "params" + ], + "title": "_safety_run_shield_Request", + "type": "object" + } + }, + "responses": { + "BadRequest400": { + "description": "The request was invalid or malformed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 400, + "title": "Bad Request", + "detail": "The request was invalid or malformed" + } + } + } + }, + "TooManyRequests429": { + "description": "The client has sent too many requests in a given amount of time", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 429, + "title": "Too Many Requests", + "detail": "You have exceeded the rate limit. Please try again later." + } + } + } + }, + "InternalServerError500": { + "description": "The server encountered an unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 500, + "title": "Internal Server Error", + "detail": "An unexpected error occurred. Our team has been notified." + } + } + } + }, + "DefaultError": { + "description": "An unexpected error occurred", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml index dd2fe12cf..b1b3ca8cd 100644 --- a/docs/static/llama-stack-spec.yaml +++ b/docs/static/llama-stack-spec.yaml @@ -13,7 +13,7 @@ paths: get: tags: - V1 - summary: List Batches + summary: List all batches for the current user. description: Query endpoint for proper schema generation. operationId: list_batches_v1_batches_get parameters: @@ -52,7 +52,7 @@ paths: post: tags: - V1 - summary: Create Batch + summary: Create a new batch for processing multiple API requests. description: Typed endpoint for proper schema generation. operationId: create_batch_v1_batches_post requestBody: @@ -84,7 +84,7 @@ paths: get: tags: - V1 - summary: Retrieve Batch + summary: Retrieve information about a specific batch. description: Query endpoint for proper schema generation. operationId: retrieve_batch_v1_batches__batch_id__get parameters: @@ -94,6 +94,7 @@ paths: schema: type: string title: Batch Id + description: The ID of the batch to retrieve. responses: '200': description: The batch object. @@ -117,7 +118,7 @@ paths: post: tags: - V1 - summary: Cancel Batch + summary: Cancel a batch that is in progress. description: Typed endpoint for proper schema generation. operationId: cancel_batch_v1_batches__batch_id__cancel_post requestBody: @@ -151,12 +152,12 @@ paths: required: true schema: type: string - description: 'Path parameter: batch_id' + description: The ID of the batch to cancel. /v1/chat/completions: get: tags: - V1 - summary: List Chat Completions + summary: List chat completions. description: Query endpoint for proper schema generation. operationId: list_chat_completions_v1_chat_completions_get parameters: @@ -207,7 +208,7 @@ paths: post: tags: - V1 - summary: Openai Chat Completion + summary: Create chat completions. description: Typed endpoint for proper schema generation. operationId: openai_chat_completion_v1_chat_completions_post requestBody: @@ -239,7 +240,7 @@ paths: get: tags: - V1 - summary: Get Chat Completion + summary: Get chat completion. description: Query endpoint for proper schema generation. operationId: get_chat_completion_v1_chat_completions__completion_id__get parameters: @@ -249,6 +250,7 @@ paths: schema: type: string title: Completion Id + description: ID of the chat completion. responses: '200': description: A OpenAICompletionWithInputMessages. @@ -272,7 +274,7 @@ paths: post: tags: - V1 - summary: Openai Completion + summary: Create completion. description: Typed endpoint for proper schema generation. operationId: openai_completion_v1_completions_post requestBody: @@ -304,7 +306,7 @@ paths: post: tags: - V1 - summary: Create Conversation + summary: Create a conversation. description: Typed endpoint for proper schema generation. operationId: create_conversation_v1_conversations_post requestBody: @@ -336,7 +338,7 @@ paths: delete: tags: - V1 - summary: Openai Delete Conversation + summary: Delete a conversation. description: Query endpoint for proper schema generation. operationId: openai_delete_conversation_v1_conversations__conversation_id__delete parameters: @@ -346,6 +348,7 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. responses: '200': description: The deleted conversation resource. @@ -368,7 +371,7 @@ paths: get: tags: - V1 - summary: Get Conversation + summary: Retrieve a conversation. description: Query endpoint for proper schema generation. operationId: get_conversation_v1_conversations__conversation_id__get parameters: @@ -378,6 +381,7 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. responses: '200': description: The conversation object. @@ -400,7 +404,7 @@ paths: post: tags: - V1 - summary: Update Conversation + summary: Update a conversation. description: Typed endpoint for proper schema generation. operationId: update_conversation_v1_conversations__conversation_id__post requestBody: @@ -434,21 +438,15 @@ paths: required: true schema: type: string - description: 'Path parameter: conversation_id' + description: The conversation identifier. /v1/conversations/{conversation_id}/items: get: tags: - V1 - summary: List Items + summary: List items. description: Query endpoint for proper schema generation. operationId: list_items_v1_conversations__conversation_id__items_get parameters: - - name: conversation_id - in: path - required: true - schema: - type: string - title: Conversation Id - name: after in: query required: true @@ -472,6 +470,13 @@ paths: schema: type: string title: Order + - name: conversation_id + in: path + required: true + schema: + type: string + title: Conversation Id + description: The conversation identifier. responses: '200': description: List of conversation items. @@ -494,7 +499,7 @@ paths: post: tags: - V1 - summary: Add Items + summary: Create items. description: Typed endpoint for proper schema generation. operationId: add_items_v1_conversations__conversation_id__items_post requestBody: @@ -528,12 +533,12 @@ paths: required: true schema: type: string - description: 'Path parameter: conversation_id' + description: The conversation identifier. /v1/conversations/{conversation_id}/items/{item_id}: delete: tags: - V1 - summary: Openai Delete Conversation Item + summary: Delete an item. description: Query endpoint for proper schema generation. operationId: openai_delete_conversation_item_v1_conversations__conversation_id__items__item_id__delete parameters: @@ -543,12 +548,14 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. - name: item_id in: path required: true schema: type: string title: Item Id + description: The item identifier. responses: '200': description: The deleted item resource. @@ -571,7 +578,7 @@ paths: get: tags: - V1 - summary: Retrieve + summary: Retrieve an item. description: Query endpoint for proper schema generation. operationId: retrieve_v1_conversations__conversation_id__items__item_id__get parameters: @@ -581,12 +588,14 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. - name: item_id in: path required: true schema: type: string title: Item Id + description: The item identifier. responses: '200': description: The conversation item. @@ -610,7 +619,7 @@ paths: post: tags: - V1 - summary: Openai Embeddings + summary: Create embeddings. description: Typed endpoint for proper schema generation. operationId: openai_embeddings_v1_embeddings_post requestBody: @@ -642,7 +651,7 @@ paths: get: tags: - V1 - summary: Openai List Files + summary: List files. description: Query endpoint for proper schema generation. operationId: openai_list_files_v1_files_get parameters: @@ -692,7 +701,7 @@ paths: post: tags: - V1 - summary: Openai Upload File + summary: Upload file. description: Response-only endpoint for proper schema generation. operationId: openai_upload_file_v1_files_post responses: @@ -718,7 +727,7 @@ paths: delete: tags: - V1 - summary: Openai Delete File + summary: Delete file. description: Query endpoint for proper schema generation. operationId: openai_delete_file_v1_files__file_id__delete parameters: @@ -728,6 +737,7 @@ paths: schema: type: string title: File Id + description: The ID of the file to use for this request. responses: '200': description: An OpenAIFileDeleteResponse indicating successful deletion. @@ -750,7 +760,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve File + summary: Retrieve file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_file_v1_files__file_id__get parameters: @@ -760,6 +770,7 @@ paths: schema: type: string title: File Id + description: The ID of the file to use for this request. responses: '200': description: An OpenAIFileObject containing file information. @@ -783,7 +794,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve File Content + summary: Retrieve file content. description: Generic endpoint - this would be replaced with actual implementation. operationId: openai_retrieve_file_content_v1_files__file_id__content_get parameters: @@ -802,7 +813,7 @@ paths: required: true schema: type: string - description: 'Path parameter: file_id' + description: The ID of the file to use for this request. responses: '200': description: The raw file content as a binary response. @@ -825,7 +836,7 @@ paths: get: tags: - V1 - summary: Health + summary: Get health status. description: Response-only endpoint for proper schema generation. operationId: health_v1_health_get responses: @@ -851,7 +862,7 @@ paths: get: tags: - V1 - summary: List Routes + summary: List routes. description: Response-only endpoint for proper schema generation. operationId: list_routes_v1_inspect_routes_get responses: @@ -901,7 +912,7 @@ paths: get: tags: - V1 - summary: List Models + summary: List all models. description: Response-only endpoint for proper schema generation. operationId: list_models_v1_models_get responses: @@ -931,7 +942,7 @@ paths: post: tags: - V1 - summary: Register Model + summary: Register model. description: Typed endpoint for proper schema generation. operationId: register_model_v1_models_post requestBody: @@ -963,7 +974,7 @@ paths: delete: tags: - V1 - summary: Unregister Model + summary: Unregister model. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_model_v1_models__model_id__delete parameters: @@ -1004,7 +1015,7 @@ paths: get: tags: - V1 - summary: Get Model + summary: Get model. description: Query endpoint for proper schema generation. operationId: get_model_v1_models__model_id__get parameters: @@ -1037,7 +1048,7 @@ paths: post: tags: - V1 - summary: Run Moderation + summary: Create moderation. description: Typed endpoint for proper schema generation. operationId: run_moderation_v1_moderations_post requestBody: @@ -1069,7 +1080,7 @@ paths: get: tags: - V1 - summary: List Prompts + summary: List all prompts. description: Response-only endpoint for proper schema generation. operationId: list_prompts_v1_prompts_get responses: @@ -1094,7 +1105,7 @@ paths: post: tags: - V1 - summary: Create Prompt + summary: Create prompt. description: Typed endpoint for proper schema generation. operationId: create_prompt_v1_prompts_post requestBody: @@ -1126,7 +1137,7 @@ paths: delete: tags: - V1 - summary: Delete Prompt + summary: Delete prompt. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_prompt_v1_prompts__prompt_id__delete parameters: @@ -1140,13 +1151,12 @@ paths: required: true schema: title: Kwargs - - &id001 - name: prompt_id + - name: prompt_id in: path required: true schema: type: string - description: 'Path parameter: prompt_id' + description: The identifier of the prompt to delete. responses: '200': description: Successful Response @@ -1168,22 +1178,23 @@ paths: get: tags: - V1 - summary: Get Prompt + summary: Get prompt. description: Query endpoint for proper schema generation. operationId: get_prompt_v1_prompts__prompt_id__get parameters: - - name: prompt_id - in: path - required: true - schema: - type: string - title: Prompt Id - name: version in: query required: true schema: type: integer title: Version + - name: prompt_id + in: path + required: true + schema: + type: string + title: Prompt Id + description: The identifier of the prompt to get. responses: '200': description: A Prompt resource. @@ -1206,7 +1217,7 @@ paths: post: tags: - V1 - summary: Update Prompt + summary: Update prompt. description: Typed endpoint for proper schema generation. operationId: update_prompt_v1_prompts__prompt_id__post requestBody: @@ -1235,12 +1246,17 @@ paths: $ref: '#/components/responses/DefaultError' description: Default Response parameters: - - *id001 + - name: prompt_id + in: path + required: true + schema: + type: string + description: The identifier of the prompt to update. /v1/prompts/{prompt_id}/set-default-version: post: tags: - V1 - summary: Set Default Version + summary: Set prompt version. description: Typed endpoint for proper schema generation. operationId: set_default_version_v1_prompts__prompt_id__set_default_version_post requestBody: @@ -1274,12 +1290,12 @@ paths: required: true schema: type: string - description: 'Path parameter: prompt_id' + description: The identifier of the prompt. /v1/prompts/{prompt_id}/versions: get: tags: - V1 - summary: List Prompt Versions + summary: List prompt versions. description: Query endpoint for proper schema generation. operationId: list_prompt_versions_v1_prompts__prompt_id__versions_get parameters: @@ -1289,6 +1305,7 @@ paths: schema: type: string title: Prompt Id + description: The identifier of the prompt to list versions for. responses: '200': description: A ListPromptsResponse containing all versions of the prompt. @@ -1312,7 +1329,7 @@ paths: get: tags: - V1 - summary: List Providers + summary: List providers. description: Response-only endpoint for proper schema generation. operationId: list_providers_v1_providers_get responses: @@ -1338,7 +1355,7 @@ paths: get: tags: - V1 - summary: Inspect Provider + summary: Get provider. description: Query endpoint for proper schema generation. operationId: inspect_provider_v1_providers__provider_id__get parameters: @@ -1348,6 +1365,7 @@ paths: schema: type: string title: Provider Id + description: The ID of the provider to inspect. responses: '200': description: A ProviderInfo object containing the provider's details. @@ -1371,7 +1389,7 @@ paths: get: tags: - V1 - summary: List Openai Responses + summary: List all responses. description: Query endpoint for proper schema generation. operationId: list_openai_responses_v1_responses_get parameters: @@ -1422,7 +1440,7 @@ paths: post: tags: - V1 - summary: Create Openai Response + summary: Create a model response. description: Typed endpoint for proper schema generation. operationId: create_openai_response_v1_responses_post requestBody: @@ -1454,7 +1472,7 @@ paths: delete: tags: - V1 - summary: Delete Openai Response + summary: Delete a response. description: Query endpoint for proper schema generation. operationId: delete_openai_response_v1_responses__response_id__delete parameters: @@ -1464,6 +1482,7 @@ paths: schema: type: string title: Response Id + description: The ID of the OpenAI response to delete. responses: '200': description: An OpenAIDeleteResponseObject @@ -1486,7 +1505,7 @@ paths: get: tags: - V1 - summary: Get Openai Response + summary: Get a model response. description: Query endpoint for proper schema generation. operationId: get_openai_response_v1_responses__response_id__get parameters: @@ -1496,6 +1515,7 @@ paths: schema: type: string title: Response Id + description: The ID of the OpenAI response to retrieve. responses: '200': description: An OpenAIResponseObject. @@ -1519,16 +1539,10 @@ paths: get: tags: - V1 - summary: List Openai Response Input Items + summary: List input items. description: Query endpoint for proper schema generation. operationId: list_openai_response_input_items_v1_responses__response_id__input_items_get parameters: - - name: response_id - in: path - required: true - schema: - type: string - title: Response Id - name: after in: query required: true @@ -1560,6 +1574,13 @@ paths: schema: $ref: '#/components/schemas/Order' default: desc + - name: response_id + in: path + required: true + schema: + type: string + title: Response Id + description: The ID of the response to retrieve input items for. responses: '200': description: An ListOpenAIResponseInputItem. @@ -1583,7 +1604,7 @@ paths: post: tags: - V1 - summary: Run Shield + summary: Run shield. description: Typed endpoint for proper schema generation. operationId: run_shield_v1_safety_run_shield_post requestBody: @@ -1615,7 +1636,7 @@ paths: get: tags: - V1 - summary: List Scoring Functions + summary: List all scoring functions. description: Response-only endpoint for proper schema generation. operationId: list_scoring_functions_v1_scoring_functions_get responses: @@ -1640,7 +1661,7 @@ paths: post: tags: - V1 - summary: Register Scoring Function + summary: Register a scoring function. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_scoring_function_v1_scoring_functions_post parameters: @@ -1676,7 +1697,7 @@ paths: delete: tags: - V1 - summary: Unregister Scoring Function + summary: Unregister a scoring function. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_scoring_function_v1_scoring_functions__scoring_fn_id__delete parameters: @@ -1717,7 +1738,7 @@ paths: get: tags: - V1 - summary: Get Scoring Function + summary: Get a scoring function by its ID. description: Query endpoint for proper schema generation. operationId: get_scoring_function_v1_scoring_functions__scoring_fn_id__get parameters: @@ -1750,7 +1771,7 @@ paths: post: tags: - V1 - summary: Score + summary: Score a list of rows. description: Typed endpoint for proper schema generation. operationId: score_v1_scoring_score_post requestBody: @@ -1782,7 +1803,7 @@ paths: post: tags: - V1 - summary: Score Batch + summary: Score a batch of rows. description: Typed endpoint for proper schema generation. operationId: score_batch_v1_scoring_score_batch_post requestBody: @@ -1814,7 +1835,7 @@ paths: get: tags: - V1 - summary: List Shields + summary: List all shields. description: Response-only endpoint for proper schema generation. operationId: list_shields_v1_shields_get responses: @@ -1839,7 +1860,7 @@ paths: post: tags: - V1 - summary: Register Shield + summary: Register a shield. description: Typed endpoint for proper schema generation. operationId: register_shield_v1_shields_post requestBody: @@ -1871,7 +1892,7 @@ paths: delete: tags: - V1 - summary: Unregister Shield + summary: Unregister a shield. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_shield_v1_shields__identifier__delete parameters: @@ -1912,7 +1933,7 @@ paths: get: tags: - V1 - summary: Get Shield + summary: Get a shield by its identifier. description: Query endpoint for proper schema generation. operationId: get_shield_v1_shields__identifier__get parameters: @@ -1983,7 +2004,7 @@ paths: post: tags: - V1 - summary: Invoke Tool + summary: Run a tool with the given arguments. description: Typed endpoint for proper schema generation. operationId: invoke_tool_v1_tool_runtime_invoke_post requestBody: @@ -2015,7 +2036,7 @@ paths: get: tags: - V1 - summary: List Runtime Tools + summary: List all tools in the runtime. description: Query endpoint for proper schema generation. operationId: list_runtime_tools_v1_tool_runtime_list_tools_get parameters: @@ -2054,7 +2075,7 @@ paths: post: tags: - V1 - summary: Rag Tool.Insert + summary: Index documents so they can be used by the RAG system. description: Generic endpoint - this would be replaced with actual implementation. operationId: rag_tool_insert_v1_tool_runtime_rag_tool_insert_post parameters: @@ -2090,7 +2111,7 @@ paths: post: tags: - V1 - summary: Rag Tool.Query + summary: Query the RAG system for context; typically invoked by the agent. description: Typed endpoint for proper schema generation. operationId: rag_tool_query_v1_tool_runtime_rag_tool_query_post requestBody: @@ -2122,7 +2143,7 @@ paths: get: tags: - V1 - summary: List Tool Groups + summary: List tool groups with optional provider. description: Response-only endpoint for proper schema generation. operationId: list_tool_groups_v1_toolgroups_get responses: @@ -2147,7 +2168,7 @@ paths: post: tags: - V1 - summary: Register Tool Group + summary: Register a tool group. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_tool_group_v1_toolgroups_post parameters: @@ -2183,7 +2204,7 @@ paths: delete: tags: - V1 - summary: Unregister Toolgroup + summary: Unregister a tool group. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_toolgroup_v1_toolgroups__toolgroup_id__delete parameters: @@ -2224,7 +2245,7 @@ paths: get: tags: - V1 - summary: Get Tool Group + summary: Get a tool group by its ID. description: Query endpoint for proper schema generation. operationId: get_tool_group_v1_toolgroups__toolgroup_id__get parameters: @@ -2257,7 +2278,7 @@ paths: get: tags: - V1 - summary: List Tools + summary: List tools with optional tool group. description: Query endpoint for proper schema generation. operationId: list_tools_v1_tools_get parameters: @@ -2290,7 +2311,7 @@ paths: get: tags: - V1 - summary: Get Tool + summary: Get a tool by its name. description: Query endpoint for proper schema generation. operationId: get_tool_v1_tools__tool_name__get parameters: @@ -2323,7 +2344,7 @@ paths: post: tags: - V1 - summary: Insert Chunks + summary: Insert chunks into a vector database. description: Generic endpoint - this would be replaced with actual implementation. operationId: insert_chunks_v1_vector_io_insert_post parameters: @@ -2359,7 +2380,7 @@ paths: post: tags: - V1 - summary: Query Chunks + summary: Query chunks from a vector database. description: Typed endpoint for proper schema generation. operationId: query_chunks_v1_vector_io_query_post requestBody: @@ -2391,7 +2412,7 @@ paths: get: tags: - V1 - summary: Openai List Vector Stores + summary: Returns a list of vector stores. description: Query endpoint for proper schema generation. operationId: openai_list_vector_stores_v1_vector_stores_get parameters: @@ -2443,7 +2464,7 @@ paths: post: tags: - V1 - summary: Openai Create Vector Store + summary: Creates a vector store. description: Typed endpoint for proper schema generation. operationId: openai_create_vector_store_v1_vector_stores_post requestBody: @@ -2475,7 +2496,7 @@ paths: delete: tags: - V1 - summary: Openai Delete Vector Store + summary: Delete a vector store. description: Query endpoint for proper schema generation. operationId: openai_delete_vector_store_v1_vector_stores__vector_store_id__delete parameters: @@ -2485,6 +2506,7 @@ paths: schema: type: string title: Vector Store Id + description: The ID of the vector store to delete. responses: '200': description: A VectorStoreDeleteResponse indicating the deletion status. @@ -2507,7 +2529,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve Vector Store + summary: Retrieves a vector store. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_v1_vector_stores__vector_store_id__get parameters: @@ -2517,6 +2539,7 @@ paths: schema: type: string title: Vector Store Id + description: The ID of the vector store to retrieve. responses: '200': description: A VectorStoreObject representing the vector store. @@ -2539,7 +2562,7 @@ paths: post: tags: - V1 - summary: Openai Update Vector Store + summary: Updates a vector store. description: Typed endpoint for proper schema generation. operationId: openai_update_vector_store_v1_vector_stores__vector_store_id__post requestBody: @@ -2573,12 +2596,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to update. /v1/vector_stores/{vector_store_id}/file_batches: post: tags: - V1 - summary: Openai Create Vector Store File Batch + summary: Create a vector store file batch. description: Typed endpoint for proper schema generation. operationId: openai_create_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches_post requestBody: @@ -2589,8 +2612,7 @@ paths: required: true responses: '200': - description: A VectorStoreFileBatchObject representing the created file - batch. + description: A VectorStoreFileBatchObject representing the created file batch. content: application/json: schema: @@ -2613,12 +2635,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to create the file batch for. /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}: get: tags: - V1 - summary: Openai Retrieve Vector Store File Batch + summary: Retrieve a vector store file batch. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__get parameters: @@ -2628,12 +2650,14 @@ paths: schema: type: string title: Batch Id + description: The ID of the file batch to retrieve. - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file batch. responses: '200': description: A VectorStoreFileBatchObject representing the file batch. @@ -2657,7 +2681,7 @@ paths: post: tags: - V1 - summary: Openai Cancel Vector Store File Batch + summary: Cancels a vector store file batch. description: Typed endpoint for proper schema generation. operationId: openai_cancel_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post requestBody: @@ -2668,8 +2692,7 @@ paths: required: true responses: '200': - description: A VectorStoreFileBatchObject representing the cancelled file - batch. + description: A VectorStoreFileBatchObject representing the cancelled file batch. content: application/json: schema: @@ -2687,38 +2710,26 @@ paths: description: Default Response $ref: '#/components/responses/DefaultError' parameters: - - name: vector_store_id - in: path - required: true - schema: - type: string - description: 'Path parameter: vector_store_id' - name: batch_id in: path required: true schema: type: string - description: 'Path parameter: batch_id' + description: The ID of the file batch to cancel. + - name: vector_store_id + in: path + required: true + schema: + type: string + description: The ID of the vector store containing the file batch. /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files: get: tags: - V1 - summary: Openai List Files In Vector Store File Batch + summary: Returns a list of vector store files in a batch. description: Query endpoint for proper schema generation. operationId: openai_list_files_in_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get parameters: - - name: batch_id - in: path - required: true - schema: - type: string - title: Batch Id - - name: vector_store_id - in: path - required: true - schema: - type: string - title: Vector Store Id - name: after in: query required: true @@ -2751,10 +2762,23 @@ paths: type: string default: desc title: Order + - name: batch_id + in: path + required: true + schema: + type: string + title: Batch Id + description: The ID of the file batch to list files from. + - name: vector_store_id + in: path + required: true + schema: + type: string + title: Vector Store Id + description: The ID of the vector store containing the file batch. responses: '200': - description: A VectorStoreFilesListInBatchResponse containing the list of - files in the batch. + description: A VectorStoreFilesListInBatchResponse containing the list of files in the batch. content: application/json: schema: @@ -2775,16 +2799,10 @@ paths: get: tags: - V1 - summary: Openai List Files In Vector Store + summary: List files in a vector store. description: Query endpoint for proper schema generation. operationId: openai_list_files_in_vector_store_v1_vector_stores__vector_store_id__files_get parameters: - - name: vector_store_id - in: path - required: true - schema: - type: string - title: Vector Store Id - name: after in: query required: true @@ -2817,6 +2835,13 @@ paths: type: string default: desc title: Order + - name: vector_store_id + in: path + required: true + schema: + type: string + title: Vector Store Id + description: The ID of the vector store to list files from. responses: '200': description: A VectorStoreListFilesResponse containing the list of files. @@ -2839,7 +2864,7 @@ paths: post: tags: - V1 - summary: Openai Attach File To Vector Store + summary: Attach a file to a vector store. description: Typed endpoint for proper schema generation. operationId: openai_attach_file_to_vector_store_v1_vector_stores__vector_store_id__files_post requestBody: @@ -2873,27 +2898,29 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to attach the file to. /v1/vector_stores/{vector_store_id}/files/{file_id}: delete: tags: - V1 - summary: Openai Delete Vector Store File + summary: Delete a vector store file. description: Query endpoint for proper schema generation. operationId: openai_delete_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__delete parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to delete. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to delete. responses: '200': description: A VectorStoreFileDeleteResponse indicating the deletion status. @@ -2916,22 +2943,24 @@ paths: get: tags: - V1 - summary: Openai Retrieve Vector Store File + summary: Retrieves a vector store file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__get parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to retrieve. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to retrieve. responses: '200': description: A VectorStoreFileObject representing the file. @@ -2954,7 +2983,7 @@ paths: post: tags: - V1 - summary: Openai Update Vector Store File + summary: Updates a vector store file. description: Typed endpoint for proper schema generation. operationId: openai_update_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__post requestBody: @@ -2988,33 +3017,35 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store containing the file to update. - name: file_id in: path required: true schema: type: string - description: 'Path parameter: file_id' + description: The ID of the file to update. /v1/vector_stores/{vector_store_id}/files/{file_id}/content: get: tags: - V1 - summary: Openai Retrieve Vector Store File Contents + summary: Retrieves the contents of a vector store file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_contents_v1_vector_stores__vector_store_id__files__file_id__content_get parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to retrieve. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to retrieve. responses: '200': description: A list of InterleavedContent representing the file contents. @@ -3038,7 +3069,7 @@ paths: post: tags: - V1 - summary: Openai Search Vector Store + summary: Search for chunks in a vector store. description: Typed endpoint for proper schema generation. operationId: openai_search_vector_store_v1_vector_stores__vector_store_id__search_post requestBody: @@ -3072,12 +3103,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to search. /v1/version: get: tags: - V1 - summary: Version + summary: Get version. description: Response-only endpoint for proper schema generation. operationId: version_v1_version_get responses: @@ -8160,10 +8191,7 @@ components: default: agent_turn_input type: object title: AgentTurnInputType - description: 'Parameter type for agent turn input. - - - :param type: Discriminator type. Always "agent_turn_input"' + description: Parameter type for agent turn input. AggregationFunctionType: type: string enum: @@ -8173,17 +8201,7 @@ components: - categorical_count - accuracy title: AggregationFunctionType - description: 'Types of aggregation functions for scoring results. - - :cvar average: Calculate the arithmetic mean of scores - - :cvar weighted_average: Calculate a weighted average of scores - - :cvar median: Calculate the median value of scores - - :cvar categorical_count: Count occurrences of categorical values - - :cvar accuracy: Calculate accuracy as the proportion of correct answers' + description: Types of aggregation functions for scoring results. AllowedToolsFilter: properties: tool_names: @@ -8193,10 +8211,7 @@ components: type: array type: object title: AllowedToolsFilter - description: 'Filter configuration for restricting which MCP tools can be used. - - - :param tool_names: (Optional) List of specific tool names that are allowed' + description: Filter configuration for restricting which MCP tools can be used. ApprovalFilter: properties: always: @@ -8211,12 +8226,7 @@ components: type: array type: object title: ApprovalFilter - description: 'Filter configuration for MCP tool approval requirements. - - - :param always: (Optional) List of tool names that always require approval - - :param never: (Optional) List of tool names that never require approval' + description: Filter configuration for MCP tool approval requirements. ArrayType: properties: type: @@ -8226,10 +8236,7 @@ components: default: array type: object title: ArrayType - description: 'Parameter type for array values. - - - :param type: Discriminator type. Always "array"' + description: Parameter type for array values. BasicScoringFnParams: properties: type: @@ -8245,12 +8252,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: BasicScoringFnParams - description: 'Parameters for basic scoring function configuration. - - :param type: The type of scoring function parameters, always basic - - :param aggregation_functions: Aggregation functions to apply to the scores - of each row' + description: Parameters for basic scoring function configuration. Batch: properties: id: @@ -8407,10 +8409,7 @@ components: default: boolean type: object title: BooleanType - description: 'Parameter type for boolean values. - - - :param type: Discriminator type. Always "boolean"' + description: Parameter type for boolean values. ChatCompletionInputType: properties: type: @@ -8420,10 +8419,7 @@ components: default: chat_completion_input type: object title: ChatCompletionInputType - description: 'Parameter type for chat completion input. - - - :param type: Discriminator type. Always "chat_completion_input"' + description: Parameter type for chat completion input. Chunk-Output: properties: content: @@ -8467,15 +8463,7 @@ components: - content - chunk_id title: Chunk - description: "A chunk of content that can be inserted into a vector database.\n\ - :param content: The content of the chunk, which can be interleaved text, images,\ - \ or other types.\n:param chunk_id: Unique identifier for the chunk. Must\ - \ be provided explicitly.\n:param metadata: Metadata associated with the chunk\ - \ that will be used in the model context during inference.\n:param embedding:\ - \ Optional embedding for the chunk. If not provided, it will be computed later.\n\ - :param chunk_metadata: Metadata for the chunk that will NOT be used in the\ - \ context during inference.\n The `chunk_metadata` is required backend\ - \ functionality." + description: A chunk of content that can be inserted into a vector database. ChunkMetadata: properties: chunk_id: @@ -8513,25 +8501,7 @@ components: type: integer type: object title: ChunkMetadata - description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used\ - \ to store additional information about the chunk that\n will not be used\ - \ in the context during inference, but is required for backend functionality.\ - \ The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and\ - \ is not expected to change after.\n Use `Chunk.metadata` for metadata\ - \ that will be used in the context during inference.\n:param chunk_id: The\ - \ ID of the chunk. If not set, it will be generated based on the document\ - \ ID and content.\n:param document_id: The ID of the document this chunk belongs\ - \ to.\n:param source: The source of the content, such as a URL, file path,\ - \ or other identifier.\n:param created_timestamp: An optional timestamp indicating\ - \ when the chunk was created.\n:param updated_timestamp: An optional timestamp\ - \ indicating when the chunk was last updated.\n:param chunk_window: The window\ - \ of the chunk, which can be used to group related chunks together.\n:param\ - \ chunk_tokenizer: The tokenizer used to create the chunk. Default is Tiktoken.\n\ - :param chunk_embedding_model: The embedding model used to create the chunk's\ - \ embedding.\n:param chunk_embedding_dimension: The dimension of the embedding\ - \ vector for the chunk.\n:param content_token_count: The number of tokens\ - \ in the content of the chunk.\n:param metadata_token_count: The number of\ - \ tokens in the metadata of the chunk." + description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference." CompletionInputType: properties: type: @@ -8541,10 +8511,7 @@ components: default: completion_input type: object title: CompletionInputType - description: 'Parameter type for completion input. - - - :param type: Discriminator type. Always "completion_input"' + description: Parameter type for completion input. Conversation: properties: id: @@ -8560,20 +8527,16 @@ components: created_at: type: integer title: Created At - description: The time at which the conversation was created, measured in - seconds since the Unix epoch. + description: The time at which the conversation was created, measured in seconds since the Unix epoch. metadata: title: Metadata - description: Set of 16 key-value pairs that can be attached to an object. - This can be useful for storing additional information about the object - in a structured format, and querying for objects via API or the dashboard. + description: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. additionalProperties: type: string type: object items: title: Items - description: Initial items to include in the conversation context. You may - add up to 20 items at a time. + description: Initial items to include in the conversation context. You may add up to 20 items at a time. items: additionalProperties: true type: object @@ -9162,12 +9125,7 @@ components: default: ' ' type: object title: DefaultRAGQueryGeneratorConfig - description: 'Configuration for the default RAG query generator. - - - :param type: Type of query generator, always ''default'' - - :param separator: String separator used to join query terms' + description: Configuration for the default RAG query generator. Errors: properties: data: @@ -9189,10 +9147,7 @@ components: required: - status title: HealthInfo - description: 'Health status information for the service. - - - :param status: Current health status of the service' + description: Health status information for the service. HealthStatus: type: string enum: @@ -9213,12 +9168,7 @@ components: required: - image title: ImageContentItem - description: 'A image content item - - - :param type: Discriminator type of the content item. Always "image" - - :param image: Image as a base64 encoded string or an URL' + description: A image content item InputTokensDetails: properties: cached_tokens: @@ -9238,10 +9188,7 @@ components: default: json type: object title: JsonType - description: 'Parameter type for JSON values. - - - :param type: Discriminator type. Always "json"' + description: Parameter type for JSON values. LLMAsJudgeScoringFnParams: properties: type: @@ -9271,18 +9218,7 @@ components: required: - judge_model title: LLMAsJudgeScoringFnParams - description: 'Parameters for LLM-as-judge scoring function configuration. - - :param type: The type of scoring function parameters, always llm_as_judge - - :param judge_model: Identifier of the LLM model to use as a judge for scoring - - :param prompt_template: (Optional) Custom prompt template for the judge model - - :param judge_score_regexes: Regexes to extract the answer from generated response - - :param aggregation_functions: Aggregation functions to apply to the scores - of each row' + description: Parameters for LLM-as-judge scoring function configuration. LLMRAGQueryGeneratorConfig: properties: type: @@ -9301,14 +9237,7 @@ components: - model - template title: LLMRAGQueryGeneratorConfig - description: 'Configuration for the LLM-based RAG query generator. - - - :param type: Type of query generator, always ''llm'' - - :param model: Name of the language model to use for query generation - - :param template: Template string for formatting the query generation prompt' + description: Configuration for the LLM-based RAG query generator. ListModelsResponse: properties: data: @@ -9343,10 +9272,7 @@ components: required: - data title: ListProvidersResponse - description: 'Response containing a list of all available providers. - - - :param data: List of provider information objects' + description: Response containing a list of all available providers. ListRoutesResponse: properties: data: @@ -9358,10 +9284,7 @@ components: required: - data title: ListRoutesResponse - description: 'Response containing a list of all available API routes. - - - :param data: List of available route information objects' + description: Response containing a list of all available API routes. ListScoringFunctionsResponse: properties: data: @@ -9395,10 +9318,7 @@ components: required: - data title: ListToolGroupsResponse - description: 'Response containing a list of tool groups. - - - :param data: List of tool groups' + description: Response containing a list of tool groups. MCPListToolsTool: properties: input_schema: @@ -9416,14 +9336,7 @@ components: - input_schema - name title: MCPListToolsTool - description: 'Tool definition returned by MCP list tools operation. - - - :param input_schema: JSON schema defining the tool''s input parameters - - :param name: Name of the tool - - :param description: (Optional) Description of what the tool does' + description: Tool definition returned by MCP list tools operation. Model: properties: identifier: @@ -9456,21 +9369,7 @@ components: - identifier - provider_id title: Model - description: 'A model resource representing an AI model registered in Llama - Stack. - - - :param type: The resource type, always ''model'' for model resources - - :param model_type: The type of model (LLM or embedding model) - - :param metadata: Any additional metadata for this model - - :param identifier: Unique identifier for this resource in llama stack - - :param provider_resource_id: Unique identifier for this resource in the provider - - :param provider_id: ID of the provider that owns this resource' + description: A model resource representing an AI model registered in Llama Stack. ModelType: type: string enum: @@ -9478,14 +9377,7 @@ components: - embedding - rerank title: ModelType - description: 'Enumeration of supported model types in Llama Stack. - - :cvar llm: Large language model for text generation and completion - - :cvar embedding: Embedding model for converting text to vector representations - - :cvar rerank: Reranking model for reordering documents based on their relevance - to a query' + description: Enumeration of supported model types in Llama Stack. ModerationObject: properties: id: @@ -9505,13 +9397,7 @@ components: - model - results title: ModerationObject - description: 'A moderation object. - - :param id: The unique identifier for the moderation request. - - :param model: The model used to generate the moderation results. - - :param results: A list of moderation objects' + description: A moderation object. ModerationObjectResults: properties: flagged: @@ -9545,18 +9431,7 @@ components: required: - flagged title: ModerationObjectResults - description: 'A moderation object. - - :param flagged: Whether any of the below categories are flagged. - - :param categories: A list of the categories, and whether they are flagged - or not. - - :param category_applied_input_types: A list of the categories along with the - input type(s) that the score applies to. - - :param category_scores: A list of the categories along with their scores as - predicted by model.' + description: A moderation object. NumberType: properties: type: @@ -9566,10 +9441,7 @@ components: default: number type: object title: NumberType - description: 'Parameter type for numeric values. - - - :param type: Discriminator type. Always "number"' + description: Parameter type for numeric values. ObjectType: properties: type: @@ -9579,10 +9451,7 @@ components: default: object type: object title: ObjectType - description: 'Parameter type for object values. - - - :param type: Discriminator type. Always "object"' + description: Parameter type for object values. OpenAIAssistantMessageParam-Input: properties: role: @@ -9607,18 +9476,7 @@ components: type: array type: object title: OpenAIAssistantMessageParam - description: 'A message containing the model''s (assistant) response in an OpenAI-compatible - chat completion request. - - - :param role: Must be "assistant" to identify this as the model''s response - - :param content: The content of the model''s response - - :param name: (Optional) The name of the assistant message participant. - - :param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall - object.' + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. OpenAIAssistantMessageParam-Output: properties: role: @@ -9643,18 +9501,7 @@ components: type: array type: object title: OpenAIAssistantMessageParam - description: 'A message containing the model''s (assistant) response in an OpenAI-compatible - chat completion request. - - - :param role: Must be "assistant" to identify this as the model''s response - - :param content: The content of the model''s response - - :param name: (Optional) The name of the assistant message participant. - - :param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall - object.' + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. OpenAIChatCompletion: properties: id: @@ -9685,21 +9532,7 @@ components: - created - model title: OpenAIChatCompletion - description: 'Response from an OpenAI-compatible chat completion request. - - - :param id: The ID of the chat completion - - :param choices: List of choices - - :param object: The object type, which will be "chat.completion" - - :param created: The Unix timestamp in seconds when the chat completion was - created - - :param model: The model that was used to generate the chat completion - - :param usage: Token usage information for the completion' + description: Response from an OpenAI-compatible chat completion request. OpenAIChatCompletionContentPartImageParam: properties: type: @@ -9713,12 +9546,7 @@ components: required: - image_url title: OpenAIChatCompletionContentPartImageParam - description: 'Image content part for OpenAI-compatible chat completion messages. - - - :param type: Must be "image_url" to identify this as image content - - :param image_url: Image URL specification and processing details' + description: Image content part for OpenAI-compatible chat completion messages. OpenAIChatCompletionContentPartTextParam: properties: type: @@ -9733,12 +9561,7 @@ components: required: - text title: OpenAIChatCompletionContentPartTextParam - description: 'Text content part for OpenAI-compatible chat completion messages. - - - :param type: Must be "text" to identify this as text content - - :param text: The text content of the message' + description: Text content part for OpenAI-compatible chat completion messages. OpenAIChatCompletionRequestWithExtraBody: properties: model: @@ -9860,55 +9683,7 @@ components: - model - messages title: OpenAIChatCompletionRequestWithExtraBody - description: 'Request parameters for OpenAI-compatible chat completion endpoint. - - - :param model: The identifier of the model to use. The model must be registered - with Llama Stack and available via the /models endpoint. - - :param messages: List of messages in the conversation. - - :param frequency_penalty: (Optional) The penalty for repeated tokens. - - :param function_call: (Optional) The function call to use. - - :param functions: (Optional) List of functions to use. - - :param logit_bias: (Optional) The logit bias to use. - - :param logprobs: (Optional) The log probabilities to use. - - :param max_completion_tokens: (Optional) The maximum number of tokens to generate. - - :param max_tokens: (Optional) The maximum number of tokens to generate. - - :param n: (Optional) The number of completions to generate. - - :param parallel_tool_calls: (Optional) Whether to parallelize tool calls. - - :param presence_penalty: (Optional) The penalty for repeated tokens. - - :param response_format: (Optional) The response format to use. - - :param seed: (Optional) The seed to use. - - :param stop: (Optional) The stop tokens to use. - - :param stream: (Optional) Whether to stream the response. - - :param stream_options: (Optional) The stream options to use. - - :param temperature: (Optional) The temperature to use. - - :param tool_choice: (Optional) The tool choice to use. - - :param tools: (Optional) The tools to use. - - :param top_logprobs: (Optional) The top log probabilities to use. - - :param top_p: (Optional) The top p to use. - - :param user: (Optional) The user to use.' + description: Request parameters for OpenAI-compatible chat completion endpoint. OpenAIChatCompletionToolCall: properties: index: @@ -9926,17 +9701,7 @@ components: $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' type: object title: OpenAIChatCompletionToolCall - description: 'Tool call specification for OpenAI-compatible chat completion - responses. - - - :param index: (Optional) Index of the tool call in the list - - :param id: (Optional) Unique identifier for the tool call - - :param type: Must be "function" to identify this as a function call - - :param function: (Optional) Function call details' + description: Tool call specification for OpenAI-compatible chat completion responses. OpenAIChatCompletionToolCallFunction: properties: name: @@ -9947,12 +9712,7 @@ components: type: string type: object title: OpenAIChatCompletionToolCallFunction - description: 'Function call details for OpenAI-compatible tool calls. - - - :param name: (Optional) Name of the function to call - - :param arguments: (Optional) Arguments to pass to the function as a JSON string' + description: Function call details for OpenAI-compatible tool calls. OpenAIChatCompletionUsage: properties: prompt_tokens: @@ -9974,18 +9734,7 @@ components: - completion_tokens - total_tokens title: OpenAIChatCompletionUsage - description: 'Usage information for OpenAI chat completion. - - - :param prompt_tokens: Number of tokens in the prompt - - :param completion_tokens: Number of tokens in the completion - - :param total_tokens: Total tokens used (prompt + completion) - - :param input_tokens_details: Detailed breakdown of input token usage - - :param output_tokens_details: Detailed breakdown of output token usage' + description: Usage information for OpenAI chat completion. OpenAIChatCompletionUsageCompletionTokensDetails: properties: reasoning_tokens: @@ -9993,10 +9742,7 @@ components: type: integer type: object title: OpenAIChatCompletionUsageCompletionTokensDetails - description: 'Token details for output tokens in OpenAI chat completion usage. - - - :param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)' + description: Token details for output tokens in OpenAI chat completion usage. OpenAIChatCompletionUsagePromptTokensDetails: properties: cached_tokens: @@ -10004,10 +9750,7 @@ components: type: integer type: object title: OpenAIChatCompletionUsagePromptTokensDetails - description: 'Token details for prompt tokens in OpenAI chat completion usage. - - - :param cached_tokens: Number of tokens retrieved from cache' + description: Token details for prompt tokens in OpenAI chat completion usage. OpenAIChoice-Output: properties: message: @@ -10040,16 +9783,7 @@ components: - finish_reason - index title: OpenAIChoice - description: 'A choice from an OpenAI-compatible chat completion response. - - - :param message: The message from the model - - :param finish_reason: The reason the model stopped generating - - :param index: The index of the choice - - :param logprobs: (Optional) The log probabilities for the tokens in the message' + description: A choice from an OpenAI-compatible chat completion response. OpenAIChoiceLogprobs-Output: properties: content: @@ -10064,13 +9798,7 @@ components: type: array type: object title: OpenAIChoiceLogprobs - description: 'The log probabilities for the tokens in the message from an OpenAI-compatible - chat completion response. - - - :param content: (Optional) The log probabilities for the tokens in the message - - :param refusal: (Optional) The log probabilities for the tokens in the message' + description: The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response. OpenAICompletion: properties: id: @@ -10099,18 +9827,7 @@ components: - created - model title: OpenAICompletion - description: 'Response from an OpenAI-compatible completion request. - - - :id: The ID of the completion - - :choices: List of choices - - :created: The Unix timestamp in seconds when the completion was created - - :model: The model that was used to generate the completion - - :object: The object type, which will be "text_completion"' + description: Response from an OpenAI-compatible completion request. OpenAICompletionChoice-Output: properties: finish_reason: @@ -10130,16 +9847,7 @@ components: - text - index title: OpenAICompletionChoice - description: 'A choice from an OpenAI-compatible completion response. - - - :finish_reason: The reason the model stopped generating - - :text: The text of the choice - - :index: The index of the choice - - :logprobs: (Optional) The log probabilities for the tokens in the choice' + description: A choice from an OpenAI-compatible completion response. OpenAICompletionRequestWithExtraBody: properties: model: @@ -10221,45 +9929,7 @@ components: - model - prompt title: OpenAICompletionRequestWithExtraBody - description: 'Request parameters for OpenAI-compatible completion endpoint. - - - :param model: The identifier of the model to use. The model must be registered - with Llama Stack and available via the /models endpoint. - - :param prompt: The prompt to generate a completion for. - - :param best_of: (Optional) The number of completions to generate. - - :param echo: (Optional) Whether to echo the prompt. - - :param frequency_penalty: (Optional) The penalty for repeated tokens. - - :param logit_bias: (Optional) The logit bias to use. - - :param logprobs: (Optional) The log probabilities to use. - - :param max_tokens: (Optional) The maximum number of tokens to generate. - - :param n: (Optional) The number of completions to generate. - - :param presence_penalty: (Optional) The penalty for repeated tokens. - - :param seed: (Optional) The seed to use. - - :param stop: (Optional) The stop tokens to use. - - :param stream: (Optional) Whether to stream the response. - - :param stream_options: (Optional) The stream options to use. - - :param temperature: (Optional) The temperature to use. - - :param top_p: (Optional) The top p to use. - - :param user: (Optional) The user to use. - - :param suffix: (Optional) The suffix that should be appended to the completion.' + description: Request parameters for OpenAI-compatible completion endpoint. OpenAICreateVectorStoreFileBatchRequestWithExtraBody: properties: file_ids: @@ -10286,15 +9956,7 @@ components: required: - file_ids title: OpenAICreateVectorStoreFileBatchRequestWithExtraBody - description: 'Request to create a vector store file batch with extra_body support. - - - :param file_ids: A list of File IDs that the vector store should use - - :param attributes: (Optional) Key-value attributes to store with the files - - :param chunking_strategy: (Optional) The chunking strategy used to chunk the - file(s). Defaults to auto' + description: Request to create a vector store file batch with extra_body support. OpenAICreateVectorStoreRequestWithExtraBody: properties: name: @@ -10320,19 +9982,7 @@ components: additionalProperties: true type: object title: OpenAICreateVectorStoreRequestWithExtraBody - description: 'Request to create a vector store with extra_body support. - - - :param name: (Optional) A name for the vector store - - :param file_ids: List of file IDs to include in the vector store - - :param expires_after: (Optional) Expiration policy for the vector store - - :param chunking_strategy: (Optional) Strategy for splitting files into chunks - - :param metadata: Set of key-value pairs that can be attached to the vector - store' + description: Request to create a vector store with extra_body support. OpenAIDeveloperMessageParam: properties: role: @@ -10354,15 +10004,7 @@ components: required: - content title: OpenAIDeveloperMessageParam - description: 'A message from the developer in an OpenAI-compatible chat completion - request. - - - :param role: Must be "developer" to identify this as a developer message - - :param content: The content of the developer message - - :param name: (Optional) The name of the developer message participant.' + description: A message from the developer in an OpenAI-compatible chat completion request. OpenAIEmbeddingData: properties: object: @@ -10385,16 +10027,7 @@ components: - embedding - index title: OpenAIEmbeddingData - description: 'A single embedding data object from an OpenAI-compatible embeddings - response. - - - :param object: The object type, which will be "embedding" - - :param embedding: The embedding vector as a list of floats (when encoding_format="float") - or as a base64-encoded string (when encoding_format="base64") - - :param index: The index of the embedding in the input list' + description: A single embedding data object from an OpenAI-compatible embeddings response. OpenAIEmbeddingUsage: properties: prompt_tokens: @@ -10408,12 +10041,7 @@ components: - prompt_tokens - total_tokens title: OpenAIEmbeddingUsage - description: 'Usage information for an OpenAI-compatible embeddings response. - - - :param prompt_tokens: The number of tokens in the input - - :param total_tokens: The total number of tokens used' + description: Usage information for an OpenAI-compatible embeddings response. OpenAIEmbeddingsRequestWithExtraBody: properties: model: @@ -10442,23 +10070,7 @@ components: - model - input title: OpenAIEmbeddingsRequestWithExtraBody - description: 'Request parameters for OpenAI-compatible embeddings endpoint. - - - :param model: The identifier of the model to use. The model must be an embedding - model registered with Llama Stack and available via the /models endpoint. - - :param input: Input text to embed, encoded as a string or array of strings. - To embed multiple inputs in a single request, pass an array of strings. - - :param encoding_format: (Optional) The format to return the embeddings in. - Can be either "float" or "base64". Defaults to "float". - - :param dimensions: (Optional) The number of dimensions the resulting output - embeddings should have. Only supported in text-embedding-3 and later models. - - :param user: (Optional) A unique identifier representing your end-user, which - can help OpenAI to monitor and detect abuse.' + description: Request parameters for OpenAI-compatible embeddings endpoint. OpenAIEmbeddingsResponse: properties: object: @@ -10482,16 +10094,7 @@ components: - model - usage title: OpenAIEmbeddingsResponse - description: 'Response from an OpenAI-compatible embeddings request. - - - :param object: The object type, which will be "list" - - :param data: List of embedding data objects - - :param model: The model that was used to generate the embeddings - - :param usage: Usage information' + description: Response from an OpenAI-compatible embeddings request. OpenAIFile: properties: type: @@ -10551,22 +10154,7 @@ components: - filename - purpose title: OpenAIFileObject - description: 'OpenAI File object as defined in the OpenAI Files API. - - - :param object: The object type, which is always "file" - - :param id: The file identifier, which can be referenced in the API endpoints - - :param bytes: The size of the file, in bytes - - :param created_at: The Unix timestamp (in seconds) for when the file was created - - :param expires_at: The Unix timestamp (in seconds) for when the file expires - - :param filename: The name of the file - - :param purpose: The intended purpose of the file' + description: OpenAI File object as defined in the OpenAI Files API. OpenAIFilePurpose: type: string enum: @@ -10586,14 +10174,7 @@ components: required: - url title: OpenAIImageURL - description: 'Image URL specification for OpenAI-compatible chat completion - messages. - - - :param url: URL of the image to include in the message - - :param detail: (Optional) Level of detail for image processing. Can be "low", - "high", or "auto"' + description: Image URL specification for OpenAI-compatible chat completion messages. OpenAIJSONSchema: properties: name: @@ -10611,17 +10192,7 @@ components: type: object type: object title: OpenAIJSONSchema - description: 'JSON schema specification for OpenAI-compatible structured response - format. - - - :param name: Name of the schema - - :param description: (Optional) Description of the schema - - :param strict: (Optional) Whether to enforce strict adherence to the schema - - :param schema: (Optional) The JSON schema definition' + description: JSON schema specification for OpenAI-compatible structured response format. OpenAIResponseAnnotationCitation: properties: type: @@ -10648,18 +10219,7 @@ components: - title - url title: OpenAIResponseAnnotationCitation - description: 'URL citation annotation for referencing external web resources. - - - :param type: Annotation type identifier, always "url_citation" - - :param end_index: End position of the citation span in the content - - :param start_index: Start position of the citation span in the content - - :param title: Title of the referenced web resource - - :param url: URL of the referenced web resource' + description: URL citation annotation for referencing external web resources. OpenAIResponseAnnotationContainerFileCitation: properties: type: @@ -10712,17 +10272,7 @@ components: - filename - index title: OpenAIResponseAnnotationFileCitation - description: 'File citation annotation for referencing specific files in response - content. - - - :param type: Annotation type identifier, always "file_citation" - - :param file_id: Unique identifier of the referenced file - - :param filename: Name of the referenced file - - :param index: Position index of the citation within the content' + description: File citation annotation for referencing specific files in response content. OpenAIResponseAnnotationFilePath: properties: type: @@ -10755,12 +10305,7 @@ components: required: - refusal title: OpenAIResponseContentPartRefusal - description: 'Refusal content within a streamed response part. - - - :param type: Content part type identifier, always "refusal" - - :param refusal: Refusal text supplied by the model' + description: Refusal content within a streamed response part. OpenAIResponseError: properties: code: @@ -10774,12 +10319,7 @@ components: - code - message title: OpenAIResponseError - description: 'Error details for failed OpenAI response requests. - - - :param code: Error code identifying the type of failure - - :param message: Human-readable error message describing the failure' + description: Error details for failed OpenAI response requests. OpenAIResponseFormatJSONObject: properties: type: @@ -10789,12 +10329,7 @@ components: default: json_object type: object title: OpenAIResponseFormatJSONObject - description: 'JSON object response format for OpenAI-compatible chat completion - requests. - - - :param type: Must be "json_object" to indicate generic JSON object response - format' + description: JSON object response format for OpenAI-compatible chat completion requests. OpenAIResponseFormatJSONSchema: properties: type: @@ -10808,13 +10343,7 @@ components: required: - json_schema title: OpenAIResponseFormatJSONSchema - description: 'JSON schema response format for OpenAI-compatible chat completion - requests. - - - :param type: Must be "json_schema" to indicate structured JSON response format - - :param json_schema: The JSON schema specification for the response' + description: JSON schema response format for OpenAI-compatible chat completion requests. OpenAIResponseFormatText: properties: type: @@ -10824,10 +10353,7 @@ components: default: text type: object title: OpenAIResponseFormatText - description: 'Text response format for OpenAI-compatible chat completion requests. - - - :param type: Must be "text" to indicate plain text response format' + description: Text response format for OpenAI-compatible chat completion requests. OpenAIResponseInputFunctionToolCallOutput: properties: call_id: @@ -10852,8 +10378,7 @@ components: - call_id - output title: OpenAIResponseInputFunctionToolCallOutput - description: This represents the output of a function call that gets passed - back to the model. + description: This represents the output of a function call that gets passed back to the model. OpenAIResponseInputMessageContentFile: properties: type: @@ -10875,18 +10400,7 @@ components: type: string type: object title: OpenAIResponseInputMessageContentFile - description: 'File content for input messages in OpenAI response format. - - - :param type: The type of the input item. Always `input_file`. - - :param file_data: The data of the file to be sent to the model. - - :param file_id: (Optional) The ID of the file to be sent to the model. - - :param file_url: The URL of the file to be sent to the model. - - :param filename: The name of the file to be sent to the model.' + description: File content for input messages in OpenAI response format. OpenAIResponseInputMessageContentImage: properties: detail: @@ -10912,17 +10426,7 @@ components: type: string type: object title: OpenAIResponseInputMessageContentImage - description: 'Image content for input messages in OpenAI response format. - - - :param detail: Level of detail for image processing, can be "low", "high", - or "auto" - - :param type: Content type identifier, always "input_image" - - :param file_id: (Optional) The ID of the file to be sent to the model. - - :param image_url: (Optional) URL of the image content' + description: Image content for input messages in OpenAI response format. OpenAIResponseInputMessageContentText: properties: text: @@ -10937,12 +10441,7 @@ components: required: - text title: OpenAIResponseInputMessageContentText - description: 'Text content for input messages in OpenAI response format. - - - :param text: The text content of the input message - - :param type: Content type identifier, always "input_text"' + description: Text content for input messages in OpenAI response format. OpenAIResponseInputToolFileSearch: properties: type: @@ -10971,20 +10470,7 @@ components: required: - vector_store_ids title: OpenAIResponseInputToolFileSearch - description: 'File search tool configuration for OpenAI response inputs. - - - :param type: Tool type identifier, always "file_search" - - :param vector_store_ids: List of vector store identifiers to search within - - :param filters: (Optional) Additional filters to apply to the search - - :param max_num_results: (Optional) Maximum number of search results to return - (1-50) - - :param ranking_options: (Optional) Options for ranking and scoring search - results' + description: File search tool configuration for OpenAI response inputs. OpenAIResponseInputToolFunction: properties: type: @@ -11010,18 +10496,7 @@ components: - name - parameters title: OpenAIResponseInputToolFunction - description: 'Function tool configuration for OpenAI response inputs. - - - :param type: Tool type identifier, always "function" - - :param name: Name of the function that can be called - - :param description: (Optional) Description of what the function does - - :param parameters: (Optional) JSON schema defining the function''s parameters - - :param strict: (Optional) Whether to enforce strict parameter validation' + description: Function tool configuration for OpenAI response inputs. OpenAIResponseInputToolMCP: properties: type: @@ -11060,24 +10535,7 @@ components: - server_label - server_url title: OpenAIResponseInputToolMCP - description: 'Model Context Protocol (MCP) tool configuration for OpenAI response - inputs. - - - :param type: Tool type identifier, always "mcp" - - :param server_label: Label to identify this MCP server - - :param server_url: URL endpoint of the MCP server - - :param headers: (Optional) HTTP headers to include when connecting to the - server - - :param require_approval: Approval requirement for tool calls ("always", "never", - or filter) - - :param allowed_tools: (Optional) Restriction on which tools can be used from - this server' + description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs. OpenAIResponseInputToolWebSearch: properties: type: @@ -11097,13 +10555,7 @@ components: pattern: ^low|medium|high$ type: object title: OpenAIResponseInputToolWebSearch - description: 'Web search tool configuration for OpenAI response inputs. - - - :param type: Web search tool type variant to use - - :param search_context_size: (Optional) Size of search context, must be "low", - "medium", or "high"' + description: Web search tool configuration for OpenAI response inputs. OpenAIResponseMCPApprovalRequest: properties: arguments: @@ -11211,13 +10663,7 @@ components: - content - role title: OpenAIResponseMessage - description: 'Corresponds to the various Message types in the Responses API. - - They are all under one type because the Responses API gives them all - - the same "type" value, and there is no way to tell them apart in certain - - scenarios.' + description: "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." OpenAIResponseMessage-Output: properties: content: @@ -11273,13 +10719,7 @@ components: - content - role title: OpenAIResponseMessage - description: 'Corresponds to the various Message types in the Responses API. - - They are all under one type because the Responses API gives them all - - the same "type" value, and there is no way to tell them apart in certain - - scenarios.' + description: "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." OpenAIResponseObject: properties: created_at: @@ -11377,45 +10817,7 @@ components: - output - status title: OpenAIResponseObject - description: 'Complete OpenAI response object containing generation results - and metadata. - - - :param created_at: Unix timestamp when the response was created - - :param error: (Optional) Error details if the response generation failed - - :param id: Unique identifier for this response - - :param model: Model identifier used for generation - - :param object: Object type identifier, always "response" - - :param output: List of generated output items (messages, tool calls, etc.) - - :param parallel_tool_calls: Whether tool calls can be executed in parallel - - :param previous_response_id: (Optional) ID of the previous response in a conversation - - :param prompt: (Optional) Reference to a prompt template and its variables. - - :param status: Current status of the response generation - - :param temperature: (Optional) Sampling temperature used for generation - - :param text: Text formatting configuration for the response - - :param top_p: (Optional) Nucleus sampling parameter used for generation - - :param tools: (Optional) An array of tools the model may call while generating - a response. - - :param truncation: (Optional) Truncation strategy applied to the response - - :param usage: (Optional) Token usage information for the response - - :param instructions: (Optional) System message inserted into the model''s - context' + description: Complete OpenAI response object containing generation results and metadata. OpenAIResponseOutputMessageContentOutputText: properties: text: @@ -11475,18 +10877,7 @@ components: - queries - status title: OpenAIResponseOutputMessageFileSearchToolCall - description: 'File search tool call output message for OpenAI responses. - - - :param id: Unique identifier for this tool call - - :param queries: List of search queries executed - - :param status: Current status of the file search operation - - :param type: Tool call type identifier, always "file_search_call" - - :param results: (Optional) Search results returned by the file search operation' + description: File search tool call output message for OpenAI responses. OpenAIResponseOutputMessageFileSearchToolCallResults: properties: attributes: @@ -11513,18 +10904,7 @@ components: - score - text title: OpenAIResponseOutputMessageFileSearchToolCallResults - description: 'Search results returned by the file search operation. - - - :param attributes: (Optional) Key-value attributes associated with the file - - :param file_id: Unique identifier of the file containing the result - - :param filename: Name of the file containing the result - - :param score: Relevance score for this search result (between 0 and 1) - - :param text: Text content of the search result' + description: Search results returned by the file search operation. OpenAIResponseOutputMessageFunctionToolCall: properties: call_id: @@ -11553,20 +10933,7 @@ components: - name - arguments title: OpenAIResponseOutputMessageFunctionToolCall - description: 'Function tool call output message for OpenAI responses. - - - :param call_id: Unique identifier for the function call - - :param name: Name of the function being called - - :param arguments: JSON string containing the function arguments - - :param type: Tool call type identifier, always "function_call" - - :param id: (Optional) Additional identifier for the tool call - - :param status: (Optional) Current status of the function call execution' + description: Function tool call output message for OpenAI responses. OpenAIResponseOutputMessageMCPCall: properties: id: @@ -11599,22 +10966,7 @@ components: - name - server_label title: OpenAIResponseOutputMessageMCPCall - description: 'Model Context Protocol (MCP) call output message for OpenAI responses. - - - :param id: Unique identifier for this MCP call - - :param type: Tool call type identifier, always "mcp_call" - - :param arguments: JSON string containing the MCP call arguments - - :param name: Name of the MCP method being called - - :param server_label: Label identifying the MCP server handling the call - - :param error: (Optional) Error message if the MCP call failed - - :param output: (Optional) Output result from the successful MCP call' + description: Model Context Protocol (MCP) call output message for OpenAI responses. OpenAIResponseOutputMessageMCPListTools: properties: id: @@ -11639,17 +10991,7 @@ components: - server_label - tools title: OpenAIResponseOutputMessageMCPListTools - description: 'MCP list tools output message containing available tools from - an MCP server. - - - :param id: Unique identifier for this MCP list tools operation - - :param type: Tool call type identifier, always "mcp_list_tools" - - :param server_label: Label identifying the MCP server providing the tools - - :param tools: List of available tools provided by the MCP server' + description: MCP list tools output message containing available tools from an MCP server. OpenAIResponseOutputMessageWebSearchToolCall: properties: id: @@ -11668,14 +11010,7 @@ components: - id - status title: OpenAIResponseOutputMessageWebSearchToolCall - description: 'Web search tool call output message for OpenAI responses. - - - :param id: Unique identifier for this tool call - - :param status: Current status of the web search operation - - :param type: Tool call type identifier, always "web_search_call"' + description: Web search tool call output message for OpenAI responses. OpenAIResponsePrompt: properties: id: @@ -11702,30 +11037,14 @@ components: required: - id title: OpenAIResponsePrompt - description: 'OpenAI compatible Prompt object that is used in OpenAI responses. - - - :param id: Unique identifier of the prompt template - - :param variables: Dictionary of variable names to OpenAIResponseInputMessageContent - structure for template substitution. The substitution values can either be - strings, or other Response input types - - like images or files. - - :param version: Version number of the prompt to use (defaults to latest if - not specified)' + description: OpenAI compatible Prompt object that is used in OpenAI responses. OpenAIResponseText: properties: format: $ref: '#/components/schemas/OpenAIResponseTextFormat' type: object title: OpenAIResponseText - description: 'Text response configuration for OpenAI responses. - - - :param format: (Optional) Text format configuration specifying output format - requirements' + description: Text response configuration for OpenAI responses. OpenAIResponseTextFormat: properties: type: @@ -11752,22 +11071,7 @@ components: type: boolean type: object title: OpenAIResponseTextFormat - description: 'Configuration for Responses API text format. - - - :param type: Must be "text", "json_schema", or "json_object" to identify the - format type - - :param name: The name of the response format. Only used for json_schema. - - :param schema: The JSON schema the response should conform to. In a Python - SDK, this is often a `pydantic` model. Only used for json_schema. - - :param description: (Optional) A description of the response format. Only - used for json_schema. - - :param strict: (Optional) Whether to strictly enforce the JSON schema. If - true, the response must match the schema exactly. Only used for json_schema.' + description: Configuration for Responses API text format. OpenAIResponseToolMCP: properties: type: @@ -11789,16 +11093,7 @@ components: required: - server_label title: OpenAIResponseToolMCP - description: 'Model Context Protocol (MCP) tool configuration for OpenAI response - object. - - - :param type: Tool type identifier, always "mcp" - - :param server_label: Label to identify this MCP server - - :param allowed_tools: (Optional) Restriction on which tools can be used from - this server' + description: Model Context Protocol (MCP) tool configuration for OpenAI response object. OpenAIResponseUsage: properties: input_tokens: @@ -11820,18 +11115,7 @@ components: - output_tokens - total_tokens title: OpenAIResponseUsage - description: 'Usage information for OpenAI response. - - - :param input_tokens: Number of tokens in the input - - :param output_tokens: Number of tokens in the output - - :param total_tokens: Total tokens used (input + output) - - :param input_tokens_details: Detailed breakdown of input token usage - - :param output_tokens_details: Detailed breakdown of output token usage' + description: Usage information for OpenAI response. OpenAIResponseUsageInputTokensDetails: properties: cached_tokens: @@ -11839,10 +11123,7 @@ components: type: integer type: object title: OpenAIResponseUsageInputTokensDetails - description: 'Token details for input tokens in OpenAI response usage. - - - :param cached_tokens: Number of tokens retrieved from cache' + description: Token details for input tokens in OpenAI response usage. OpenAIResponseUsageOutputTokensDetails: properties: reasoning_tokens: @@ -11850,10 +11131,7 @@ components: type: integer type: object title: OpenAIResponseUsageOutputTokensDetails - description: 'Token details for output tokens in OpenAI response usage. - - - :param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)' + description: Token details for output tokens in OpenAI response usage. OpenAISystemMessageParam: properties: role: @@ -11875,16 +11153,7 @@ components: required: - content title: OpenAISystemMessageParam - description: 'A system message providing instructions or context to the model. - - - :param role: Must be "system" to identify this as a system message - - :param content: The content of the "system prompt". If multiple system messages - are provided, they are concatenated. The underlying Llama Stack code may also - add other system messages (for example, for formatting tool definitions). - - :param name: (Optional) The name of the system message participant.' + description: A system message providing instructions or context to the model. OpenAITokenLogProb: properties: token: @@ -11909,17 +11178,7 @@ components: - logprob - top_logprobs title: OpenAITokenLogProb - description: 'The log probability for a token from an OpenAI-compatible chat - completion response. - - - :token: The token - - :bytes: (Optional) The bytes for the token - - :logprob: The log probability of the token - - :top_logprobs: The top log probabilities for the token' + description: The log probability for a token from an OpenAI-compatible chat completion response. OpenAIToolMessageParam: properties: role: @@ -11942,16 +11201,7 @@ components: - tool_call_id - content title: OpenAIToolMessageParam - description: 'A message representing the result of a tool invocation in an OpenAI-compatible - chat completion request. - - - :param role: Must be "tool" to identify this as a tool response - - :param tool_call_id: Unique identifier for the tool call this response is - for - - :param content: The response content from the tool' + description: A message representing the result of a tool invocation in an OpenAI-compatible chat completion request. OpenAITopLogProb: properties: token: @@ -11970,15 +11220,7 @@ components: - token - logprob title: OpenAITopLogProb - description: 'The top log probability for a token from an OpenAI-compatible - chat completion response. - - - :token: The token - - :bytes: (Optional) The bytes for the token - - :logprob: The log probability of the token' + description: The top log probability for a token from an OpenAI-compatible chat completion response. OpenAIUserMessageParam-Input: properties: role: @@ -12009,16 +11251,7 @@ components: required: - content title: OpenAIUserMessageParam - description: 'A message from the user in an OpenAI-compatible chat completion - request. - - - :param role: Must be "user" to identify this as a user message - - :param content: The content of the message, which can include text and other - media - - :param name: (Optional) The name of the user message participant.' + description: A message from the user in an OpenAI-compatible chat completion request. OpenAIUserMessageParam-Output: properties: role: @@ -12049,27 +11282,14 @@ components: required: - content title: OpenAIUserMessageParam - description: 'A message from the user in an OpenAI-compatible chat completion - request. - - - :param role: Must be "user" to identify this as a user message - - :param content: The content of the message, which can include text and other - media - - :param name: (Optional) The name of the user message participant.' + description: A message from the user in an OpenAI-compatible chat completion request. Order: type: string enum: - asc - desc title: Order - description: 'Sort order for paginated responses. - - :cvar asc: Ascending order - - :cvar desc: Descending order' + description: Sort order for paginated responses. OutputTokensDetails: properties: reasoning_tokens: @@ -12111,22 +11331,7 @@ components: - version - prompt_id title: Prompt - description: 'A prompt resource representing a stored OpenAI Compatible prompt - template in Llama Stack. - - - :param prompt: The system prompt text with variable placeholders. Variables - are only supported when using the Responses API. - - :param version: Version (integer starting at 1, incremented on save) - - :param prompt_id: Unique identifier formatted as ''pmpt_<48-digit-hash>'' - - :param variables: List of prompt variable names that can be used in the prompt - template - - :param is_default: Boolean indicating whether this version is the default - version for this prompt' + description: A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack. ProviderInfo: properties: api: @@ -12154,19 +11359,7 @@ components: - config - health title: ProviderInfo - description: 'Information about a registered provider including its configuration - and health status. - - - :param api: The API name this provider implements - - :param provider_id: Unique identifier for the provider - - :param provider_type: The type of provider implementation - - :param config: Configuration parameters for the provider - - :param health: Current health status of the provider' + description: Information about a registered provider including its configuration and health status. QueryChunksResponse: properties: chunks: @@ -12184,12 +11377,7 @@ components: - chunks - scores title: QueryChunksResponse - description: 'Response from querying chunks in a vector database. - - - :param chunks: List of content chunks returned from the query - - :param scores: Relevance scores corresponding to each returned chunk' + description: Response from querying chunks in a vector database. RAGQueryConfig: properties: query_generator_config: @@ -12216,13 +11404,7 @@ components: chunk_template: type: string title: Chunk Template - default: 'Result {index} - - Content: {chunk.content} - - Metadata: {metadata} - - ' + default: "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n" mode: default: vector $ref: '#/components/schemas/RAGSearchMode' @@ -12238,16 +11420,7 @@ components: weighted: '#/components/schemas/WeightedRanker' type: object title: RAGQueryConfig - description: "Configuration for the RAG query generation.\n\n:param query_generator_config:\ - \ Configuration for the query generator.\n:param max_tokens_in_context: Maximum\ - \ number of tokens in the context.\n:param max_chunks: Maximum number of chunks\ - \ to retrieve.\n:param chunk_template: Template for formatting each retrieved\ - \ chunk in the context.\n Available placeholders: {index} (1-based chunk\ - \ ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata\ - \ dict).\n Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata:\ - \ {metadata}\\n\"\n:param mode: Search mode for retrieval\u2014either \"vector\"\ - , \"keyword\", or \"hybrid\". Default \"vector\".\n:param ranker: Configuration\ - \ for the ranker to use in hybrid search. Defaults to RRF ranker." + description: Configuration for the RAG query generation. RAGQueryResult: properties: content: @@ -12278,12 +11451,7 @@ components: title: Metadata type: object title: RAGQueryResult - description: 'Result of a RAG query containing retrieved content and metadata. - - - :param content: (Optional) The retrieved content from the query - - :param metadata: Additional metadata about the query result' + description: Result of a RAG query containing retrieved content and metadata. RAGSearchMode: type: string enum: @@ -12291,13 +11459,7 @@ components: - keyword - hybrid title: RAGSearchMode - description: 'Search modes for RAG query retrieval: - - - VECTOR: Uses vector similarity search for semantic matching - - - KEYWORD: Uses keyword-based search for exact matching - - - HYBRID: Combines both vector and keyword search for better results' + description: "Search modes for RAG query retrieval:\n- VECTOR: Uses vector similarity search for semantic matching\n- KEYWORD: Uses keyword-based search for exact matching\n- HYBRID: Combines both vector and keyword search for better results" RRFRanker: properties: type: @@ -12312,10 +11474,7 @@ components: minimum: 0.0 type: object title: RRFRanker - description: "Reciprocal Rank Fusion (RRF) ranker configuration.\n\n:param type:\ - \ The type of ranker, always \"rrf\"\n:param impact_factor: The impact factor\ - \ for RRF scoring. Higher values give more weight to higher-ranked results.\n\ - \ Must be greater than 0" + description: Reciprocal Rank Fusion (RRF) ranker configuration. RegexParserScoringFnParams: properties: type: @@ -12337,14 +11496,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: RegexParserScoringFnParams - description: 'Parameters for regex parser scoring function configuration. - - :param type: The type of scoring function parameters, always regex_parser - - :param parsing_regexes: Regex to extract the answer from generated response - - :param aggregation_functions: Aggregation functions to apply to the scores - of each row' + description: Parameters for regex parser scoring function configuration. RouteInfo: properties: route: @@ -12364,25 +11516,14 @@ components: - method - provider_types title: RouteInfo - description: 'Information about an API route including its path, method, and - implementing providers. - - - :param route: The API endpoint path - - :param method: HTTP method for the route - - :param provider_types: List of provider types that implement this route' + description: Information about an API route including its path, method, and implementing providers. RunShieldResponse: properties: violation: $ref: '#/components/schemas/SafetyViolation' type: object title: RunShieldResponse - description: 'Response from running a safety shield. - - - :param violation: (Optional) Safety violation detected by the shield, if any' + description: Response from running a safety shield. SafetyViolation: properties: violation_level: @@ -12398,15 +11539,7 @@ components: required: - violation_level title: SafetyViolation - description: 'Details of a safety violation detected by content moderation. - - - :param violation_level: Severity level of the violation - - :param user_message: (Optional) Message to convey to the user about the violation - - :param metadata: Additional metadata including specific violation codes for - debugging and telemetry' + description: Details of a safety violation detected by content moderation. ScoreBatchResponse: properties: dataset_id: @@ -12421,12 +11554,7 @@ components: required: - results title: ScoreBatchResponse - description: 'Response from batch scoring operations on datasets. - - - :param dataset_id: (Optional) The identifier of the dataset that was scored - - :param results: A map of scoring function name to ScoringResult' + description: Response from batch scoring operations on datasets. ScoreResponse: properties: results: @@ -12438,10 +11566,7 @@ components: required: - results title: ScoreResponse - description: 'The response from scoring. - - - :param results: A map of scoring function name to ScoringResult.' + description: The response from scoring. ScoringFn-Output: properties: identifier: @@ -12498,8 +11623,7 @@ components: union: '#/components/schemas/UnionType' params: title: Params - description: The parameters for the scoring function for benchmark eval, - these can be overridden for app eval + description: The parameters for the scoring function for benchmark eval, these can be overridden for app eval oneOf: - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' - $ref: '#/components/schemas/RegexParserScoringFnParams' @@ -12516,9 +11640,7 @@ components: - provider_id - return_type title: ScoringFn - description: 'A scoring function resource for evaluating model outputs. - - :param type: The resource type, always scoring_function' + description: A scoring function resource for evaluating model outputs. ScoringResult: properties: score_rows: @@ -12536,13 +11658,7 @@ components: - score_rows - aggregated_results title: ScoringResult - description: 'A scoring result for a single row. - - - :param score_rows: The scoring result for each row. Each row is a map of column - name to value. - - :param aggregated_results: Map of metric name to aggregated value' + description: A scoring result for a single row. SearchRankingOptions: properties: ranker: @@ -12554,12 +11670,7 @@ components: type: number type: object title: SearchRankingOptions - description: 'Options for ranking and filtering search results. - - - :param ranker: (Optional) Name of the ranking algorithm to use - - :param score_threshold: (Optional) Minimum relevance score threshold for results' + description: Options for ranking and filtering search results. Shield: properties: identifier: @@ -12588,12 +11699,7 @@ components: - identifier - provider_id title: Shield - description: 'A safety shield resource that can be used to check content. - - - :param params: (Optional) Configuration parameters for the shield - - :param type: The resource type, always shield' + description: A safety shield resource that can be used to check content. StringType: properties: type: @@ -12603,10 +11709,7 @@ components: default: string type: object title: StringType - description: 'Parameter type for string values. - - - :param type: Discriminator type. Always "string"' + description: Parameter type for string values. TextContentItem: properties: type: @@ -12621,12 +11724,7 @@ components: required: - text title: TextContentItem - description: 'A text content item - - - :param type: Discriminator type of the content item. Always "text" - - :param text: Text content' + description: A text content item ToolDef: properties: toolgroup_id: @@ -12654,21 +11752,7 @@ components: required: - name title: ToolDef - description: 'Tool definition used in runtime contexts. - - - :param name: Name of the tool - - :param description: (Optional) Human-readable description of what the tool - does - - :param input_schema: (Optional) JSON Schema for tool inputs (MCP inputSchema) - - :param output_schema: (Optional) JSON Schema for tool outputs (MCP outputSchema) - - :param metadata: (Optional) Additional metadata about the tool - - :param toolgroup_id: (Optional) ID of the tool group this tool belongs to' + description: Tool definition used in runtime contexts. ToolGroup: properties: identifier: @@ -12699,15 +11783,7 @@ components: - identifier - provider_id title: ToolGroup - description: 'A group of related tools managed together. - - - :param type: Type of resource, always ''tool_group'' - - :param mcp_endpoint: (Optional) Model Context Protocol endpoint for remote - tools - - :param args: (Optional) Additional arguments for the tool group' + description: A group of related tools managed together. ToolInvocationResult: properties: content: @@ -12744,16 +11820,7 @@ components: type: object type: object title: ToolInvocationResult - description: 'Result of a tool invocation. - - - :param content: (Optional) The output content from the tool execution - - :param error_message: (Optional) Error message if the tool execution failed - - :param error_code: (Optional) Numeric error code if the tool execution failed - - :param metadata: (Optional) Additional metadata about the tool execution' + description: Result of a tool invocation. URL: properties: uri: @@ -12763,10 +11830,7 @@ components: required: - uri title: URL - description: 'A URL reference to external content. - - - :param uri: The URL string pointing to the resource' + description: A URL reference to external content. UnionType: properties: type: @@ -12776,10 +11840,7 @@ components: default: union type: object title: UnionType - description: 'Parameter type for union values. - - - :param type: Discriminator type. Always "union"' + description: Parameter type for union values. VectorStoreChunkingStrategyAuto: properties: type: @@ -12789,10 +11850,7 @@ components: default: auto type: object title: VectorStoreChunkingStrategyAuto - description: 'Automatic chunking strategy for vector store files. - - - :param type: Strategy type, always "auto" for automatic chunking' + description: Automatic chunking strategy for vector store files. VectorStoreChunkingStrategyStatic: properties: type: @@ -12806,12 +11864,7 @@ components: required: - static title: VectorStoreChunkingStrategyStatic - description: 'Static chunking strategy with configurable parameters. - - - :param type: Strategy type, always "static" for static chunking - - :param static: Configuration parameters for the static chunking strategy' + description: Static chunking strategy with configurable parameters. VectorStoreChunkingStrategyStaticConfig: properties: chunk_overlap_tokens: @@ -12826,14 +11879,7 @@ components: default: 800 type: object title: VectorStoreChunkingStrategyStaticConfig - description: 'Configuration for static chunking strategy. - - - :param chunk_overlap_tokens: Number of tokens to overlap between adjacent - chunks - - :param max_chunk_size_tokens: Maximum number of tokens per chunk, must be - between 100 and 4096' + description: Configuration for static chunking strategy. VectorStoreContent: properties: type: @@ -12848,12 +11894,7 @@ components: - type - text title: VectorStoreContent - description: 'Content item from a vector store file or search result. - - - :param type: Content type, currently only "text" is supported - - :param text: The actual text content' + description: Content item from a vector store file or search result. VectorStoreFileBatchObject: properties: id: @@ -12890,20 +11931,7 @@ components: - status - file_counts title: VectorStoreFileBatchObject - description: 'OpenAI Vector Store File Batch object. - - - :param id: Unique identifier for the file batch - - :param object: Object type identifier, always "vector_store.file_batch" - - :param created_at: Timestamp when the file batch was created - - :param vector_store_id: ID of the vector store containing the file batch - - :param status: Current processing status of the file batch - - :param file_counts: File processing status counts for the batch' + description: OpenAI Vector Store File Batch object. VectorStoreFileCounts: properties: completed: @@ -12929,18 +11957,7 @@ components: - in_progress - total title: VectorStoreFileCounts - description: 'File processing status counts for a vector store. - - - :param completed: Number of files that have been successfully processed - - :param cancelled: Number of files that had their processing cancelled - - :param failed: Number of files that failed to process - - :param in_progress: Number of files currently being processed - - :param total: Total number of files in the vector store' + description: File processing status counts for a vector store. VectorStoreFileLastError: properties: code: @@ -12958,12 +11975,7 @@ components: - code - message title: VectorStoreFileLastError - description: 'Error information for failed vector store file processing. - - - :param code: Error code indicating the type of failure - - :param message: Human-readable error message describing the failure' + description: Error information for failed vector store file processing. VectorStoreFileObject: properties: id: @@ -13018,26 +12030,7 @@ components: - status - vector_store_id title: VectorStoreFileObject - description: 'OpenAI Vector Store File object. - - - :param id: Unique identifier for the file - - :param object: Object type identifier, always "vector_store.file" - - :param attributes: Key-value attributes associated with the file - - :param chunking_strategy: Strategy used for splitting the file into chunks - - :param created_at: Timestamp when the file was added to the vector store - - :param last_error: (Optional) Error information if file processing failed - - :param status: Current processing status of the file - - :param usage_bytes: Storage space used by this file in bytes - - :param vector_store_id: ID of the vector store containing this file' + description: OpenAI Vector Store File object. VectorStoreObject: properties: id: @@ -13083,32 +12076,7 @@ components: - created_at - file_counts title: VectorStoreObject - description: 'OpenAI Vector Store object. - - - :param id: Unique identifier for the vector store - - :param object: Object type identifier, always "vector_store" - - :param created_at: Timestamp when the vector store was created - - :param name: (Optional) Name of the vector store - - :param usage_bytes: Storage space used by the vector store in bytes - - :param file_counts: File processing status counts for the vector store - - :param status: Current status of the vector store - - :param expires_after: (Optional) Expiration policy for the vector store - - :param expires_at: (Optional) Timestamp when the vector store will expire - - :param last_active_at: (Optional) Timestamp of last activity on the vector - store - - :param metadata: Set of key-value pairs that can be attached to the vector - store' + description: OpenAI Vector Store object. VectorStoreSearchResponse: properties: file_id: @@ -13140,18 +12108,7 @@ components: - score - content title: VectorStoreSearchResponse - description: 'Response from searching a vector store. - - - :param file_id: Unique identifier of the file containing the result - - :param filename: Name of the file containing the result - - :param score: Relevance score for this search result - - :param attributes: (Optional) Key-value attributes associated with the file - - :param content: List of content items matching the search query' + description: Response from searching a vector store. VectorStoreSearchResponsePage: properties: object: @@ -13178,18 +12135,7 @@ components: - search_query - data title: VectorStoreSearchResponsePage - description: 'Paginated response from searching a vector store. - - - :param object: Object type identifier for the search results page - - :param search_query: The original search query that was executed - - :param data: List of search result objects - - :param has_more: Whether there are more results available beyond this page - - :param next_page: (Optional) Token for retrieving the next page of results' + description: Paginated response from searching a vector store. VersionInfo: properties: version: @@ -13199,10 +12145,7 @@ components: required: - version title: VersionInfo - description: 'Version information for the service. - - - :param version: Version number of the service' + description: Version information for the service. ViolationLevel: type: string enum: @@ -13210,14 +12153,7 @@ components: - warn - error title: ViolationLevel - description: 'Severity level of a safety violation. - - - :cvar INFO: Informational level violation that does not require action - - :cvar WARN: Warning level violation that suggests caution but allows continuation - - :cvar ERROR: Error level violation that requires blocking or intervention' + description: Severity level of a safety violation. WeightedRanker: properties: type: @@ -13230,16 +12166,11 @@ components: maximum: 1.0 minimum: 0.0 title: Alpha - description: Weight factor between 0 and 1. 0 means only keyword scores, - 1 means only vector scores. + description: Weight factor between 0 and 1. 0 means only keyword scores, 1 means only vector scores. default: 0.5 type: object title: WeightedRanker - description: "Weighted ranker configuration that combines vector and keyword\ - \ scores.\n\n:param type: The type of ranker, always \"weighted\"\n:param\ - \ alpha: Weight factor between 0 and 1.\n 0 means only use keyword\ - \ scores,\n 1 means only use vector scores,\n values\ - \ in between blend both scores." + description: Weighted ranker configuration that combines vector and keyword scores. _URLOrData: properties: url: @@ -13250,13 +12181,7 @@ components: type: string type: object title: _URLOrData - description: 'A URL or a base64 encoded string - - - :param url: A URL of the image or data URL in the format of data:image/{type};base64,{data}. - Note that URL could have length limits. - - :param data: base64 encoded image data as string' + description: A URL or a base64 encoded string _batches_Request: properties: input_file_id: @@ -13720,18 +12645,7 @@ components: - ranking_options title: _vector_stores_vector_store_id_search_Request Error: - description: 'Error response from the API. Roughly follows RFC 7807. - - - :param status: HTTP status code - - :param title: Error title, a short summary of the error which is invariant - for an error type - - :param detail: Error detail, a longer human-readable description of the error - - :param instance: (Optional) A URL which can be used to retrieve more information - about the specific occurrence of the error' + description: Error response from the API. Roughly follows RFC 7807. properties: status: title: Status @@ -13753,25 +12667,20 @@ components: title: Error type: object ListOpenAIResponseInputItem: - description: 'List container for OpenAI response input items. - - - :param data: List of input items - - :param object: Object type identifier, always "list"' + description: List container for OpenAI response input items. properties: data: items: anyOf: - discriminator: mapping: - file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest' - mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' - mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' - message: '#/components/schemas/OpenAIResponseMessage' - web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + file_search_call: '#/$defs/OpenAIResponseOutputMessageFileSearchToolCall' + function_call: '#/$defs/OpenAIResponseOutputMessageFunctionToolCall' + mcp_approval_request: '#/$defs/OpenAIResponseMCPApprovalRequest' + mcp_call: '#/$defs/OpenAIResponseOutputMessageMCPCall' + mcp_list_tools: '#/$defs/OpenAIResponseOutputMessageMCPListTools' + message: '#/$defs/OpenAIResponseMessage' + web_search_call: '#/$defs/OpenAIResponseOutputMessageWebSearchToolCall' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIResponseMessage' @@ -13796,18 +12705,7 @@ components: title: ListOpenAIResponseInputItem type: object ListOpenAIResponseObject: - description: 'Paginated list of OpenAI response objects with navigation metadata. - - - :param data: List of response objects with their input context - - :param has_more: Whether there are more results available beyond this page - - :param first_id: Identifier of the first item in this page - - :param last_id: Identifier of the last item in this page - - :param object: Object type identifier, always "list"' + description: Paginated list of OpenAI response objects with navigation metadata. properties: data: items: @@ -13836,14 +12734,7 @@ components: title: ListOpenAIResponseObject type: object OpenAIDeleteResponseObject: - description: 'Response object confirming deletion of an OpenAI response. - - - :param id: Unique identifier of the deleted response - - :param object: Object type identifier, always "response" - - :param deleted: Deletion confirmation flag, always True' + description: Response object confirming deletion of an OpenAI response. properties: id: title: Id @@ -13937,18 +12828,7 @@ components: title: ConversationItemDeletedResource type: object ListOpenAIFileResponse: - description: 'Response for listing files in OpenAI Files API. - - - :param data: List of file objects - - :param has_more: Whether there are more files available beyond this page - - :param first_id: ID of the first file in the list for pagination - - :param last_id: ID of the last file in the list for pagination - - :param object: The object type, which is always "list"' + description: Response for listing files in OpenAI Files API. properties: data: items: @@ -13977,14 +12857,7 @@ components: title: ListOpenAIFileResponse type: object OpenAIFileDeleteResponse: - description: 'Response for deleting a file in OpenAI Files API. - - - :param id: The file identifier that was deleted - - :param object: The object type, which is always "file" - - :param deleted: Whether the file was successfully deleted' + description: Response for deleting a file in OpenAI Files API. properties: id: title: Id @@ -14003,19 +12876,7 @@ components: title: OpenAIFileDeleteResponse type: object ListOpenAIChatCompletionResponse: - description: 'Response from listing OpenAI-compatible chat completions. - - - :param data: List of chat completion objects with their input messages - - :param has_more: Whether there are more completions available beyond this - list - - :param first_id: ID of the first completion in this list - - :param last_id: ID of the last completion in this list - - :param object: Must be "list" to identify this as a list response' + description: Response from listing OpenAI-compatible chat completions. properties: data: items: @@ -14044,18 +12905,7 @@ components: title: ListOpenAIChatCompletionResponse type: object OpenAIAssistantMessageParam: - description: 'A message containing the model''s (assistant) response in an OpenAI-compatible - chat completion request. - - - :param role: Must be "assistant" to identify this as the model''s response - - :param content: The content of the model''s response - - :param name: (Optional) The name of the assistant message participant. - - :param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall - object.' + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. properties: role: const: assistant @@ -14083,25 +12933,16 @@ components: title: OpenAIAssistantMessageParam type: object OpenAIChoice: - description: 'A choice from an OpenAI-compatible chat completion response. - - - :param message: The message from the model - - :param finish_reason: The reason the model stopped generating - - :param index: The index of the choice - - :param logprobs: (Optional) The log probabilities for the tokens in the message' + description: A choice from an OpenAI-compatible chat completion response. properties: message: discriminator: mapping: - assistant: '#/components/schemas/OpenAIAssistantMessageParam' - developer: '#/components/schemas/OpenAIDeveloperMessageParam' - system: '#/components/schemas/OpenAISystemMessageParam' - tool: '#/components/schemas/OpenAIToolMessageParam' - user: '#/components/schemas/OpenAIUserMessageParam' + assistant: '#/$defs/OpenAIAssistantMessageParam' + developer: '#/$defs/OpenAIDeveloperMessageParam' + system: '#/$defs/OpenAISystemMessageParam' + tool: '#/$defs/OpenAIToolMessageParam' + user: '#/$defs/OpenAIUserMessageParam' propertyName: role oneOf: - $ref: '#/components/schemas/OpenAIUserMessageParam' @@ -14126,13 +12967,7 @@ components: title: OpenAIChoice type: object OpenAIChoiceLogprobs: - description: 'The log probabilities for the tokens in the message from an OpenAI-compatible - chat completion response. - - - :param content: (Optional) The log probabilities for the tokens in the message - - :param refusal: (Optional) The log probabilities for the tokens in the message' + description: The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response. properties: content: title: Content @@ -14176,11 +13011,11 @@ components: items: discriminator: mapping: - assistant: '#/components/schemas/OpenAIAssistantMessageParam' - developer: '#/components/schemas/OpenAIDeveloperMessageParam' - system: '#/components/schemas/OpenAISystemMessageParam' - tool: '#/components/schemas/OpenAIToolMessageParam' - user: '#/components/schemas/OpenAIUserMessageParam' + assistant: '#/$defs/OpenAIAssistantMessageParam' + developer: '#/$defs/OpenAIDeveloperMessageParam' + system: '#/$defs/OpenAISystemMessageParam' + tool: '#/$defs/OpenAIToolMessageParam' + user: '#/$defs/OpenAIUserMessageParam' propertyName: role oneOf: - $ref: '#/components/schemas/OpenAIUserMessageParam' @@ -14199,16 +13034,7 @@ components: title: OpenAICompletionWithInputMessages type: object OpenAIUserMessageParam: - description: 'A message from the user in an OpenAI-compatible chat completion - request. - - - :param role: Must be "user" to identify this as a user message - - :param content: The content of the message, which can include text and other - media - - :param name: (Optional) The name of the user message participant.' + description: A message from the user in an OpenAI-compatible chat completion request. properties: role: const: user @@ -14221,9 +13047,9 @@ components: - items: discriminator: mapping: - file: '#/components/schemas/OpenAIFile' - image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' - text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + file: '#/$defs/OpenAIFile' + image_url: '#/$defs/OpenAIChatCompletionContentPartImageParam' + text: '#/$defs/OpenAIChatCompletionContentPartTextParam' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' @@ -14240,9 +13066,7 @@ components: title: OpenAIUserMessageParam type: object ScoringFn: - description: 'A scoring function resource for evaluating model outputs. - - :param type: The resource type, always scoring_function' + description: A scoring function resource for evaluating model outputs. properties: identifier: description: Unique identifier for this resource in llama stack @@ -14275,16 +13099,16 @@ components: description: The return type of the deterministic function discriminator: mapping: - agent_turn_input: '#/components/schemas/AgentTurnInputType' - array: '#/components/schemas/ArrayType' - boolean: '#/components/schemas/BooleanType' - chat_completion_input: '#/components/schemas/ChatCompletionInputType' - completion_input: '#/components/schemas/CompletionInputType' - json: '#/components/schemas/JsonType' - number: '#/components/schemas/NumberType' - object: '#/components/schemas/ObjectType' - string: '#/components/schemas/StringType' - union: '#/components/schemas/UnionType' + agent_turn_input: '#/$defs/AgentTurnInputType' + array: '#/$defs/ArrayType' + boolean: '#/$defs/BooleanType' + chat_completion_input: '#/$defs/ChatCompletionInputType' + completion_input: '#/$defs/CompletionInputType' + json: '#/$defs/JsonType' + number: '#/$defs/NumberType' + object: '#/$defs/ObjectType' + string: '#/$defs/StringType' + union: '#/$defs/UnionType' propertyName: type oneOf: - $ref: '#/components/schemas/StringType' @@ -14299,14 +13123,13 @@ components: - $ref: '#/components/schemas/AgentTurnInputType' title: Return Type params: - description: The parameters for the scoring function for benchmark eval, - these can be overridden for app eval + description: The parameters for the scoring function for benchmark eval, these can be overridden for app eval title: Params discriminator: mapping: - basic: '#/components/schemas/BasicScoringFnParams' - llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' - regex_parser: '#/components/schemas/RegexParserScoringFnParams' + basic: '#/$defs/BasicScoringFnParams' + llm_as_judge: '#/$defs/LLMAsJudgeScoringFnParams' + regex_parser: '#/$defs/RegexParserScoringFnParams' propertyName: type oneOf: - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' @@ -14320,10 +13143,7 @@ components: title: ScoringFn type: object ListToolDefsResponse: - description: 'Response containing a list of tool definitions. - - - :param data: List of tool definitions' + description: Response containing a list of tool definitions. properties: data: items: @@ -14335,14 +13155,7 @@ components: title: ListToolDefsResponse type: object VectorStoreDeleteResponse: - description: 'Response from deleting a vector store. - - - :param id: Unique identifier of the deleted vector store - - :param object: Object type identifier for the deletion response - - :param deleted: Whether the deletion operation was successful' + description: Response from deleting a vector store. properties: id: title: Id @@ -14360,16 +13173,7 @@ components: title: VectorStoreDeleteResponse type: object VectorStoreFileContentsResponse: - description: 'Response from retrieving the contents of a vector store file. - - - :param file_id: Unique identifier for the file - - :param filename: Name of the file - - :param attributes: Key-value attributes associated with the file - - :param content: List of content items from the file' + description: Response from retrieving the contents of a vector store file. properties: file_id: title: File Id @@ -14394,14 +13198,7 @@ components: title: VectorStoreFileContentsResponse type: object VectorStoreFileDeleteResponse: - description: 'Response from deleting a vector store file. - - - :param id: Unique identifier of the deleted file - - :param object: Object type identifier for the deletion response - - :param deleted: Whether the deletion operation was successful' + description: Response from deleting a vector store file. properties: id: title: Id @@ -14419,18 +13216,7 @@ components: title: VectorStoreFileDeleteResponse type: object VectorStoreFilesListInBatchResponse: - description: 'Response from listing files in a vector store file batch. - - - :param object: Object type identifier, always "list" - - :param data: List of vector store file objects in the batch - - :param first_id: (Optional) ID of the first file in the list for pagination - - :param last_id: (Optional) ID of the last file in the list for pagination - - :param has_more: Whether there are more files available beyond this page' + description: Response from listing files in a vector store file batch. properties: object: default: list @@ -14458,18 +13244,7 @@ components: title: VectorStoreFilesListInBatchResponse type: object VectorStoreListFilesResponse: - description: 'Response from listing files in a vector store. - - - :param object: Object type identifier, always "list" - - :param data: List of vector store file objects - - :param first_id: (Optional) ID of the first file in the list for pagination - - :param last_id: (Optional) ID of the last file in the list for pagination - - :param has_more: Whether there are more files available beyond this page' + description: Response from listing files in a vector store. properties: object: default: list @@ -14497,19 +13272,7 @@ components: title: VectorStoreListFilesResponse type: object VectorStoreListResponse: - description: 'Response from listing vector stores. - - - :param object: Object type identifier, always "list" - - :param data: List of vector store objects - - :param first_id: (Optional) ID of the first vector store in the list for pagination - - :param last_id: (Optional) ID of the last vector store in the list for pagination - - :param has_more: Whether there are more vector stores available beyond this - page' + description: Response from listing vector stores. properties: object: default: list @@ -14537,13 +13300,7 @@ components: title: VectorStoreListResponse type: object OpenAIResponseMessage: - description: 'Corresponds to the various Message types in the Responses API. - - They are all under one type because the Responses API gives them all - - the same "type" value, and there is no way to tell them apart in certain - - scenarios.' + description: "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." properties: content: anyOf: @@ -14551,9 +13308,9 @@ components: - items: discriminator: mapping: - input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile' - input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage' - input_text: '#/components/schemas/OpenAIResponseInputMessageContentText' + input_file: '#/$defs/OpenAIResponseInputMessageContentFile' + input_image: '#/$defs/OpenAIResponseInputMessageContentImage' + input_text: '#/$defs/OpenAIResponseInputMessageContentText' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIResponseInputMessageContentText' @@ -14563,8 +13320,8 @@ components: - items: discriminator: mapping: - output_text: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText' - refusal: '#/components/schemas/OpenAIResponseContentPartRefusal' + output_text: '#/$defs/OpenAIResponseOutputMessageContentOutputText' + refusal: '#/$defs/OpenAIResponseContentPartRefusal' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText' @@ -14601,10 +13358,7 @@ components: title: OpenAIResponseMessage type: object OpenAIResponseObjectWithInput: - description: 'OpenAI response object extended with input context information. - - - :param input: List of input items that led to this response' + description: OpenAI response object extended with input context information. properties: created_at: title: Created At @@ -14627,13 +13381,13 @@ components: items: discriminator: mapping: - file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest' - mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' - mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' - message: '#/components/schemas/OpenAIResponseMessage' - web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + file_search_call: '#/$defs/OpenAIResponseOutputMessageFileSearchToolCall' + function_call: '#/$defs/OpenAIResponseOutputMessageFunctionToolCall' + mcp_approval_request: '#/$defs/OpenAIResponseMCPApprovalRequest' + mcp_call: '#/$defs/OpenAIResponseOutputMessageMCPCall' + mcp_list_tools: '#/$defs/OpenAIResponseOutputMessageMCPListTools' + message: '#/$defs/OpenAIResponseMessage' + web_search_call: '#/$defs/OpenAIResponseOutputMessageWebSearchToolCall' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIResponseMessage' @@ -14677,12 +13431,12 @@ components: items: discriminator: mapping: - file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch' - function: '#/components/schemas/OpenAIResponseInputToolFunction' - mcp: '#/components/schemas/OpenAIResponseToolMCP' - web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch' - web_search_preview: '#/components/schemas/OpenAIResponseInputToolWebSearch' - web_search_preview_2025_03_11: '#/components/schemas/OpenAIResponseInputToolWebSearch' + file_search: '#/$defs/OpenAIResponseInputToolFileSearch' + function: '#/$defs/OpenAIResponseInputToolFunction' + mcp: '#/$defs/OpenAIResponseToolMCP' + web_search: '#/$defs/OpenAIResponseInputToolWebSearch' + web_search_preview: '#/$defs/OpenAIResponseInputToolWebSearch' + web_search_preview_2025_03_11: '#/$defs/OpenAIResponseInputToolWebSearch' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch' @@ -14707,13 +13461,13 @@ components: anyOf: - discriminator: mapping: - file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest' - mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' - mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' - message: '#/components/schemas/OpenAIResponseMessage' - web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + file_search_call: '#/$defs/OpenAIResponseOutputMessageFileSearchToolCall' + function_call: '#/$defs/OpenAIResponseOutputMessageFunctionToolCall' + mcp_approval_request: '#/$defs/OpenAIResponseMCPApprovalRequest' + mcp_call: '#/$defs/OpenAIResponseOutputMessageMCPCall' + mcp_list_tools: '#/$defs/OpenAIResponseOutputMessageMCPListTools' + message: '#/$defs/OpenAIResponseMessage' + web_search_call: '#/$defs/OpenAIResponseOutputMessageWebSearchToolCall' propertyName: type oneOf: - $ref: '#/components/schemas/OpenAIResponseMessage' @@ -14789,9 +13543,9 @@ components: example: status: 500 title: Internal Server Error - detail: An unexpected error occurred + detail: An unexpected error occurred. Our team has been notified. DefaultError: - description: An error occurred + description: An unexpected error occurred content: application/json: schema: diff --git a/docs/static/stainless-llama-stack-spec.json b/docs/static/stainless-llama-stack-spec.json new file mode 100644 index 000000000..07e051f53 --- /dev/null +++ b/docs/static/stainless-llama-stack-spec.json @@ -0,0 +1,16303 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Llama Stack API - Stable & Experimental APIs", + "description": "A comprehensive API for building and deploying AI applications\n\n**\ud83d\udd17 COMBINED**: This specification includes both stable production-ready APIs and experimental pre-release APIs. Use stable APIs for production deployments and experimental APIs for testing new features.", + "version": "1.0.0" + }, + "servers": [ + { + "url": "https://api.llamastack.com", + "description": "Production server" + }, + { + "url": "https://staging-api.llamastack.com", + "description": "Staging server" + } + ], + "paths": { + "/v1beta/datasetio/append-rows/{dataset_id}": { + "post": { + "tags": [ + "V1Beta" + ], + "summary": "Append rows to a dataset.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "append_rows_v1beta_datasetio_append_rows__dataset_id__post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: dataset_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1beta/datasetio/iterrows/{dataset_id}": { + "get": { + "tags": [ + "V1Beta" + ], + "summary": "Get a paginated list of rows from a dataset.", + "description": "Query endpoint for proper schema generation.", + "operationId": "iterrows_v1beta_datasetio_iterrows__dataset_id__get", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Dataset Id" + } + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1beta/datasets": { + "get": { + "tags": [ + "V1Beta" + ], + "summary": "List all datasets.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_datasets_v1beta_datasets_get", + "responses": { + "200": { + "description": "A ListDatasetsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDatasetsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1Beta" + ], + "summary": "Register a new dataset.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_dataset_v1beta_datasets_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____datasets_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1beta/datasets/{dataset_id}": { + "delete": { + "tags": [ + "V1Beta" + ], + "summary": "Unregister a dataset by its ID.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_dataset_v1beta_datasets__dataset_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: dataset_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Beta" + ], + "summary": "Get a dataset by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_dataset_v1beta_datasets__dataset_id__get", + "parameters": [ + { + "name": "dataset_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Dataset Id" + } + } + ], + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "List all agents.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_agents_v1alpha_agents_get", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Create an agent with the given configuration.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_v1alpha_agents_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentConfig" + } + } + } + }, + "responses": { + "200": { + "description": "An AgentCreateResponse with the agent ID.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentCreateResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Delete an agent by its ID and its associated sessions and turns.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_agent_v1alpha_agents__agent_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to delete." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Describe an agent by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agent_v1alpha_agents__agent_id__get", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "ID of the agent." + } + ], + "responses": { + "200": { + "description": "An Agent of the agent.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Agent" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/session": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Create a new session for an agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_session_v1alpha_agents__agent_id__session_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An AgentSessionCreateResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentSessionCreateResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to create the session for." + } + ] + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Delete an agent session by its ID and its associated turns.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_agents_session_v1alpha_agents__agent_id__session__session_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to delete." + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to delete the session for." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Retrieve an agent session by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_session_v1alpha_agents__agent_id__session__session_id__get", + "parameters": [ + { + "name": "turn_ids", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Turn Ids" + } + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get." + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the session for." + } + ], + "responses": { + "200": { + "description": "A Session.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Session" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Create a new turn for an agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_agent_turn_v1alpha_agents__agent_id__session__session_id__turn_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_session_id_turn_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "If stream=False, returns a Turn object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to create the turn for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to create the turn for." + } + ] + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Retrieve an agent turn by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__get", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the turn for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get the turn for." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Turn Id" + }, + "description": "The ID of the turn to get." + } + ], + "responses": { + "200": { + "description": "A Turn.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Resume an agent turn with executed tool call responses.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "resume_agent_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__resume_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/__main_____agents_agent_id_session_session_id_turn_turn_id_resume_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Turn" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the agent to resume." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the session to resume." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the turn to resume." + } + ] + } + }, + "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Retrieve an agent step by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_agents_step_v1alpha_agents__agent_id__session__session_id__turn__turn_id__step__step_id__get", + "parameters": [ + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to get the step for." + }, + { + "name": "session_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Session Id" + }, + "description": "The ID of the session to get the step for." + }, + { + "name": "turn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Turn Id" + }, + "description": "The ID of the turn to get the step for." + }, + { + "name": "step_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Step Id" + }, + "description": "The ID of the step to get." + } + ], + "responses": { + "200": { + "description": "An AgentStepResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AgentStepResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/agents/{agent_id}/sessions": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "List all session(s) of a given agent.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_agent_sessions_v1alpha_agents__agent_id__sessions_get", + "parameters": [ + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "start_index", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Start Index" + } + }, + { + "name": "agent_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Agent Id" + }, + "description": "The ID of the agent to list sessions for." + } + ], + "responses": { + "200": { + "description": "A PaginatedResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PaginatedResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "List all benchmarks.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_benchmarks_v1alpha_eval_benchmarks_get", + "responses": { + "200": { + "description": "A ListBenchmarksResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBenchmarksResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Register a benchmark.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_benchmark_v1alpha_eval_benchmarks_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Unregister a benchmark.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_benchmark_v1alpha_eval_benchmarks__benchmark_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to unregister." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get a benchmark by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_benchmark_v1alpha_eval_benchmarks__benchmark_id__get", + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to get." + } + ], + "responses": { + "200": { + "description": "A Benchmark.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Benchmark" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/evaluations": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Evaluate a list of rows on a benchmark.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "evaluate_rows_v1alpha_eval_benchmarks__benchmark_id__evaluations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenchmarkConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "EvaluateResponse object containing generations and scores.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvaluateResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + } + ] + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/jobs": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Run an evaluation on a benchmark.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_eval_v1alpha_eval_benchmarks__benchmark_id__jobs_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BenchmarkConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The job that was created to run the evaluation.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + } + ] + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}": { + "delete": { + "tags": [ + "V1Alpha" + ], + "summary": "Cancel a job.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "job_cancel_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the job to cancel." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the status of a job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "job_status_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__get", + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Job Id" + }, + "description": "The ID of the job to get the status of." + } + ], + "responses": { + "200": { + "description": "The status of the evaluation job.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Job" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the result of a job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "job_result_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__result_get", + "parameters": [ + { + "name": "benchmark_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Benchmark Id" + }, + "description": "The ID of the benchmark to run the evaluation on." + }, + { + "name": "job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Job Id" + }, + "description": "The ID of the job to get the result of." + } + ], + "responses": { + "200": { + "description": "The result of the job.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EvaluateResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/inference/rerank": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Rerank a list of documents based on their relevance to a query.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "rerank_v1alpha_inference_rerank_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_inference_rerank_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "RerankResponse with indices sorted by relevance score (descending).", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RerankResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1alpha/post-training/job/artifacts": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the artifacts of a training job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_training_job_artifacts_v1alpha_post_training_job_artifacts_get", + "parameters": [ + { + "name": "job_uuid", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Job Uuid" + } + } + ], + "responses": { + "200": { + "description": "A PostTrainingJobArtifactsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJobArtifactsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/post-training/job/cancel": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Cancel a training job.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "cancel_training_job_v1alpha_post_training_job_cancel_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/post-training/job/status": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get the status of a training job.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_training_job_status_v1alpha_post_training_job_status_get", + "parameters": [ + { + "name": "job_uuid", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Job Uuid" + } + } + ], + "responses": { + "200": { + "description": "A PostTrainingJobStatusResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJobStatusResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1alpha/post-training/jobs": { + "get": { + "tags": [ + "V1Alpha" + ], + "summary": "Get all training jobs.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "get_training_jobs_v1alpha_post_training_jobs_get", + "responses": { + "200": { + "description": "A ListPostTrainingJobsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPostTrainingJobsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1alpha/post-training/preference-optimize": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Run preference optimization of a model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "preference_optimize_v1alpha_post_training_preference_optimize_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DPOAlignmentConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A PostTrainingJob.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJob" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1alpha/post-training/supervised-fine-tune": { + "post": { + "tags": [ + "V1Alpha" + ], + "summary": "Run supervised fine-tuning of a model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "supervised_fine_tune_v1alpha_post_training_supervised_fine_tune_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TrainingConfig" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A PostTrainingJob.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostTrainingJob" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/batches": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all batches for the current user.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_batches_v1_batches_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + } + ], + "responses": { + "200": { + "description": "A list of batch objects.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListBatchesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create a new batch for processing multiple API requests.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_batch_v1_batches_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_batches_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The created batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/batches/{batch_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve information about a specific batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "retrieve_batch_v1_batches__batch_id__get", + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the batch to retrieve." + } + ], + "responses": { + "200": { + "description": "The batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/batches/{batch_id}/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancel a batch that is in progress.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "cancel_batch_v1_batches__batch_id__cancel_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_batches_batch_id_cancel_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The updated batch object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Batch" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the batch to cancel." + } + ] + } + }, + "/v1/chat/completions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List chat completions.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_chat_completions_v1_chat_completions_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "model", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "A ListOpenAIChatCompletionResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIChatCompletionResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create chat completions.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_chat_completion_v1_chat_completions_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIChatCompletionRequestWithExtraBody" + } + } + } + }, + "responses": { + "200": { + "description": "An OpenAIChatCompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIChatCompletion" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/chat/completions/{completion_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get chat completion.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_chat_completion_v1_chat_completions__completion_id__get", + "parameters": [ + { + "name": "completion_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Completion Id" + }, + "description": "ID of the chat completion." + } + ], + "responses": { + "200": { + "description": "A OpenAICompletionWithInputMessages.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/completions": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create completion.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_completion_v1_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An OpenAICompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletion" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/conversations": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a conversation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_conversation_v1_conversations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_conversations_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The created conversation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Conversation" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/conversations/{conversation_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a conversation.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_conversation_v1_conversations__conversation_id__delete", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + } + ], + "responses": { + "200": { + "description": "The deleted conversation resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationDeletedResource" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve a conversation.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_conversation_v1_conversations__conversation_id__get", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + } + ], + "responses": { + "200": { + "description": "The conversation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Conversation" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Update a conversation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "update_conversation_v1_conversations__conversation_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_conversations_conversation_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The updated conversation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Conversation" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The conversation identifier." + } + ] + } + }, + "/v1/conversations/{conversation_id}/items": { + "get": { + "tags": [ + "V1" + ], + "summary": "List items.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_items_v1_conversations__conversation_id__items_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "include", + "in": "query", + "required": true, + "schema": { + "$ref": "#/components/schemas/ConversationItemInclude" + } + }, + { + "name": "limit", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Order" + } + }, + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + } + ], + "responses": { + "200": { + "description": "List of conversation items.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationItemList" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create items.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "add_items_v1_conversations__conversation_id__items_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_conversations_conversation_id_items_Request" + } + } + } + }, + "responses": { + "200": { + "description": "List of created items.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationItemList" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The conversation identifier." + } + ] + } + }, + "/v1/conversations/{conversation_id}/items/{item_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete an item.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_conversation_item_v1_conversations__conversation_id__items__item_id__delete", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + }, + { + "name": "item_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Item Id" + }, + "description": "The item identifier." + } + ], + "responses": { + "200": { + "description": "The deleted item resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConversationItemDeletedResource" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve an item.", + "description": "Query endpoint for proper schema generation.", + "operationId": "retrieve_v1_conversations__conversation_id__items__item_id__get", + "parameters": [ + { + "name": "conversation_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Conversation Id" + }, + "description": "The conversation identifier." + }, + { + "name": "item_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Item Id" + }, + "description": "The item identifier." + } + ], + "responses": { + "200": { + "description": "The conversation item.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/embeddings": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create embeddings.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_embeddings_v1_embeddings_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "An OpenAIEmbeddingsResponse containing the embeddings.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "List files.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_v1_files_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "purpose", + "in": "query", + "required": true, + "schema": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 10000, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "An ListOpenAIFileResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIFileResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Upload file.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "openai_upload_file_v1_files_post", + "responses": { + "200": { + "description": "An OpenAIFileObject representing the uploaded file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/files/{file_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_file_v1_files__file_id__delete", + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "An OpenAIFileDeleteResponse indicating successful deletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_file_v1_files__file_id__get", + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "An OpenAIFileObject containing file information.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/files/{file_id}/content": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve file content.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "openai_retrieve_file_content_v1_files__file_id__content_get", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file to use for this request." + } + ], + "responses": { + "200": { + "description": "The raw file content as a binary response.", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/health": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get health status.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "health_v1_health_get", + "responses": { + "200": { + "description": "Health information indicating if the service is operational.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthInfo" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/inspect/routes": { + "get": { + "tags": [ + "V1" + ], + "summary": "List routes.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_routes_v1_inspect_routes_get", + "responses": { + "200": { + "description": "Response containing information about all available routes.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListRoutesResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/models": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all models.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_models_v1_models_get", + "responses": { + "200": { + "description": "A ListModelsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register model.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_model_v1_models_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_models_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Model.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Model" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/models/{model_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister model.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_model_v1_models__model_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "model_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: model_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get model.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_model_v1_models__model_id__get", + "parameters": [ + { + "name": "model_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Id" + } + } + ], + "responses": { + "200": { + "description": "A Model.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Model" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/moderations": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create moderation.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_moderation_v1_moderations_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_moderations_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A moderation object.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModerationObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/prompts": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all prompts.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_prompts_v1_prompts_get", + "responses": { + "200": { + "description": "A ListPromptsResponse containing all prompts.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPromptsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create prompt.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_prompt_v1_prompts_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_prompts_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The created Prompt resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/prompts/{prompt_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete prompt.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "delete_prompt_v1_prompts__prompt_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The identifier of the prompt to delete." + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get prompt.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_prompt_v1_prompts__prompt_id__get", + "parameters": [ + { + "name": "version", + "in": "query", + "required": true, + "schema": { + "type": "integer", + "title": "Version" + } + }, + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Prompt Id" + }, + "description": "The identifier of the prompt to get." + } + ], + "responses": { + "200": { + "description": "A Prompt resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Update prompt.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "update_prompt_v1_prompts__prompt_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_prompts_prompt_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "The updated Prompt resource with incremented version.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The identifier of the prompt to update." + } + ] + } + }, + "/v1/prompts/{prompt_id}/set-default-version": { + "post": { + "tags": [ + "V1" + ], + "summary": "Set prompt version.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "set_default_version_v1_prompts__prompt_id__set_default_version_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_prompts_prompt_id_set_default_version_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "The prompt with the specified version now set as default.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The identifier of the prompt." + } + ] + } + }, + "/v1/prompts/{prompt_id}/versions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List prompt versions.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_prompt_versions_v1_prompts__prompt_id__versions_get", + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Prompt Id" + }, + "description": "The identifier of the prompt to list versions for." + } + ], + "responses": { + "200": { + "description": "A ListPromptsResponse containing all versions of the prompt.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPromptsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/providers": { + "get": { + "tags": [ + "V1" + ], + "summary": "List providers.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_providers_v1_providers_get", + "responses": { + "200": { + "description": "A ListProvidersResponse containing information about all providers.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListProvidersResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/providers/{provider_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get provider.", + "description": "Query endpoint for proper schema generation.", + "operationId": "inspect_provider_v1_providers__provider_id__get", + "parameters": [ + { + "name": "provider_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Provider Id" + }, + "description": "The ID of the provider to inspect." + } + ], + "responses": { + "200": { + "description": "A ProviderInfo object containing the provider's details.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProviderInfo" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/responses": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all responses.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_openai_responses_v1_responses_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "model", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 50, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + } + ], + "responses": { + "200": { + "description": "A ListOpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Create a model response.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "create_openai_response_v1_responses_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_responses_Request" + } + } + } + }, + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/responses/{response_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a response.", + "description": "Query endpoint for proper schema generation.", + "operationId": "delete_openai_response_v1_responses__response_id__delete", + "parameters": [ + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the OpenAI response to delete." + } + ], + "responses": { + "200": { + "description": "An OpenAIDeleteResponseObject", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIDeleteResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a model response.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_openai_response_v1_responses__response_id__get", + "parameters": [ + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the OpenAI response to retrieve." + } + ], + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/responses/{response_id}/input_items": { + "get": { + "tags": [ + "V1" + ], + "summary": "List input items.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_openai_response_input_items_v1_responses__response_id__input_items_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "include", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Include" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order", + "default": "desc" + } + }, + { + "name": "response_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Response Id" + }, + "description": "The ID of the response to retrieve input items for." + } + ], + "responses": { + "200": { + "description": "An ListOpenAIResponseInputItem.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseInputItem" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/safety/run-shield": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run shield.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "run_shield_v1_safety_run_shield_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_safety_run_shield_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A RunShieldResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RunShieldResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/scoring-functions": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all scoring functions.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_scoring_functions_v1_scoring_functions_get", + "responses": { + "200": { + "description": "A ListScoringFunctionsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListScoringFunctionsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a scoring function.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_scoring_function_v1_scoring_functions_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/scoring-functions/{scoring_fn_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a scoring function.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_scoring_function_v1_scoring_functions__scoring_fn_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "scoring_fn_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: scoring_fn_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a scoring function by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_scoring_function_v1_scoring_functions__scoring_fn_id__get", + "parameters": [ + { + "name": "scoring_fn_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Scoring Fn Id" + } + } + ], + "responses": { + "200": { + "description": "A ScoringFn.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoringFn" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/scoring/score": { + "post": { + "tags": [ + "V1" + ], + "summary": "Score a list of rows.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "score_v1_scoring_score_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_scoring_score_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A ScoreResponse object containing rows and aggregated results.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoreResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/scoring/score-batch": { + "post": { + "tags": [ + "V1" + ], + "summary": "Score a batch of rows.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "score_batch_v1_scoring_score_batch_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_scoring_score_batch_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A ScoreBatchResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoreBatchResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/shields": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all shields.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_shields_v1_shields_get", + "responses": { + "200": { + "description": "A ListShieldsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListShieldsResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a shield.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "register_shield_v1_shields_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_shields_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/shields/{identifier}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a shield.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_shield_v1_shields__identifier__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "identifier", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: identifier" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a shield by its identifier.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_shield_v1_shields__identifier__get", + "parameters": [ + { + "name": "identifier", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Identifier" + } + } + ], + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tool-runtime/invoke": { + "post": { + "tags": [ + "V1" + ], + "summary": "Run a tool with the given arguments.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "invoke_tool_v1_tool_runtime_invoke_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_tool_runtime_invoke_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A ToolInvocationResult.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolInvocationResult" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/tool-runtime/list-tools": { + "get": { + "tags": [ + "V1" + ], + "summary": "List all tools in the runtime.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_runtime_tools_v1_tool_runtime_list_tools_get", + "parameters": [ + { + "name": "tool_group_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Tool Group Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/URL" + } + } + } + }, + "responses": { + "200": { + "description": "A ListToolDefsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolDefsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tool-runtime/rag-tool/insert": { + "post": { + "tags": [ + "V1" + ], + "summary": "Index documents so they can be used by the RAG system.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "rag_tool_insert_v1_tool_runtime_rag_tool_insert_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tool-runtime/rag-tool/query": { + "post": { + "tags": [ + "V1" + ], + "summary": "Query the RAG system for context; typically invoked by the agent.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "rag_tool_query_v1_tool_runtime_rag_tool_query_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_tool_runtime_rag_tool_query_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "RAGQueryResult containing the retrieved content and metadata", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RAGQueryResult" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/toolgroups": { + "get": { + "tags": [ + "V1" + ], + "summary": "List tool groups with optional provider.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "list_tool_groups_v1_toolgroups_get", + "responses": { + "200": { + "description": "A ListToolGroupsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolGroupsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Register a tool group.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "register_tool_group_v1_toolgroups_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/toolgroups/{toolgroup_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Unregister a tool group.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "unregister_toolgroup_v1_toolgroups__toolgroup_id__delete", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + }, + { + "name": "toolgroup_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "Path parameter: toolgroup_id" + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Get a tool group by its ID.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_tool_group_v1_toolgroups__toolgroup_id__get", + "parameters": [ + { + "name": "toolgroup_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Toolgroup Id" + } + } + ], + "responses": { + "200": { + "description": "A ToolGroup.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolGroup" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tools": { + "get": { + "tags": [ + "V1" + ], + "summary": "List tools with optional tool group.", + "description": "Query endpoint for proper schema generation.", + "operationId": "list_tools_v1_tools_get", + "parameters": [ + { + "name": "toolgroup_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Toolgroup Id" + } + } + ], + "responses": { + "200": { + "description": "A ListToolDefsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolDefsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/tools/{tool_name}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get a tool by its name.", + "description": "Query endpoint for proper schema generation.", + "operationId": "get_tool_v1_tools__tool_name__get", + "parameters": [ + { + "name": "tool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Tool Name" + } + } + ], + "responses": { + "200": { + "description": "A ToolDef.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolDef" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector-io/insert": { + "post": { + "tags": [ + "V1" + ], + "summary": "Insert chunks into a vector database.", + "description": "Generic endpoint - this would be replaced with actual implementation.", + "operationId": "insert_chunks_v1_vector_io_insert_post", + "parameters": [ + { + "name": "args", + "in": "query", + "required": true, + "schema": { + "title": "Args" + } + }, + { + "name": "kwargs", + "in": "query", + "required": true, + "schema": { + "title": "Kwargs" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector-io/query": { + "post": { + "tags": [ + "V1" + ], + "summary": "Query chunks from a vector database.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "query_chunks_v1_vector_io_query_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_io_query_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A QueryChunksResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueryChunksResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + }, + "/v1/vector_stores": { + "get": { + "tags": [ + "V1" + ], + "summary": "Returns a list of vector stores.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_vector_stores_v1_vector_stores_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + } + ], + "responses": { + "200": { + "description": "A VectorStoreListResponse containing the list of vector stores.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreListResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Creates a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_create_vector_store_v1_vector_stores_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICreateVectorStoreRequestWithExtraBody" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreObject representing the created vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_vector_store_v1_vector_stores__vector_store_id__delete", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to delete." + } + ], + "responses": { + "200": { + "description": "A VectorStoreDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_v1_vector_stores__vector_store_id__get", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to retrieve." + } + ], + "responses": { + "200": { + "description": "A VectorStoreObject representing the vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Updates a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_update_vector_store_v1_vector_stores__vector_store_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreObject representing the updated vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to update." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches": { + "post": { + "tags": [ + "V1" + ], + "summary": "Create a vector store file batch.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_create_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICreateVectorStoreFileBatchRequestWithExtraBody" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the created file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to create the file batch for." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieve a vector store file batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__get", + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the file batch to retrieve." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file batch." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { + "post": { + "tags": [ + "V1" + ], + "summary": "Cancels a vector store file batch.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_cancel_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreFileBatchObject representing the cancelled file batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileBatchObject" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file batch to cancel." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store containing the file batch." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "Returns a list of vector store files in a batch.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_in_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "filter", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Filter" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + }, + { + "name": "batch_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Id" + }, + "description": "The ID of the file batch to list files from." + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file batch." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFilesListInBatchResponse containing the list of files in the batch.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFilesListInBatchResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}/files": { + "get": { + "tags": [ + "V1" + ], + "summary": "List files in a vector store.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_list_files_in_vector_store_v1_vector_stores__vector_store_id__files_get", + "parameters": [ + { + "name": "after", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "After" + } + }, + { + "name": "before", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Before" + } + }, + { + "name": "filter", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Filter" + } + }, + { + "name": "limit", + "in": "query", + "required": false, + "schema": { + "type": "integer", + "default": 20, + "title": "Limit" + } + }, + { + "name": "order", + "in": "query", + "required": false, + "schema": { + "type": "string", + "default": "desc", + "title": "Order" + } + }, + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store to list files from." + } + ], + "responses": { + "200": { + "description": "A VectorStoreListFilesResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreListFilesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Attach a file to a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_attach_file_to_vector_store_v1_vector_stores__vector_store_id__files_post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_files_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the attached file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to attach the file to." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/files/{file_id}": { + "delete": { + "tags": [ + "V1" + ], + "summary": "Delete a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_delete_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__delete", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to delete." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to delete." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__get", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to retrieve." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to retrieve." + } + ], + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + }, + "post": { + "tags": [ + "V1" + ], + "summary": "Updates a vector store file.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_update_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__post", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_files_file_id_Request" + } + } + } + }, + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the updated file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store containing the file to update." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the file to update." + } + ] + } + }, + "/v1/vector_stores/{vector_store_id}/files/{file_id}/content": { + "get": { + "tags": [ + "V1" + ], + "summary": "Retrieves the contents of a vector store file.", + "description": "Query endpoint for proper schema generation.", + "operationId": "openai_retrieve_vector_store_file_contents_v1_vector_stores__vector_store_id__files__file_id__content_get", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Vector Store Id" + }, + "description": "The ID of the vector store containing the file to retrieve." + }, + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + }, + "description": "The ID of the file to retrieve." + } + ], + "responses": { + "200": { + "description": "A list of InterleavedContent representing the file contents.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileContentsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400", + "description": "Bad Request" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429", + "description": "Too Many Requests" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500", + "description": "Internal Server Error" + }, + "default": { + "$ref": "#/components/responses/DefaultError", + "description": "Default Response" + } + } + } + }, + "/v1/vector_stores/{vector_store_id}/search": { + "post": { + "tags": [ + "V1" + ], + "summary": "Search for chunks in a vector store.", + "description": "Typed endpoint for proper schema generation.", + "operationId": "openai_search_vector_store_v1_vector_stores__vector_store_id__search_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/_vector_stores_vector_store_id_search_Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "A VectorStoreSearchResponse containing the search results.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreSearchResponsePage" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + }, + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "required": true, + "schema": { + "type": "string" + }, + "description": "The ID of the vector store to search." + } + ] + } + }, + "/v1/version": { + "get": { + "tags": [ + "V1" + ], + "summary": "Get version.", + "description": "Response-only endpoint for proper schema generation.", + "operationId": "version_v1_version_get", + "responses": { + "200": { + "description": "Version information containing the service version number.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VersionInfo" + } + } + } + }, + "400": { + "description": "Bad Request", + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "description": "Too Many Requests", + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "description": "Internal Server Error", + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "description": "Default Response", + "$ref": "#/components/responses/DefaultError" + } + } + } + } + }, + "components": { + "schemas": { + "AgentCandidate": { + "properties": { + "type": { + "type": "string", + "const": "agent", + "title": "Type", + "default": "agent" + }, + "config": { + "$ref": "#/components/schemas/AgentConfig" + } + }, + "type": "object", + "required": [ + "config" + ], + "title": "AgentCandidate", + "description": "An agent candidate for evaluation." + }, + "AgentConfig": { + "properties": { + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "input_shields": { + "title": "Input Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "output_shields": { + "title": "Output Shields", + "items": { + "type": "string" + }, + "type": "array" + }, + "toolgroups": { + "title": "Toolgroups", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ] + }, + "type": "array" + }, + "client_tools": { + "title": "Client Tools", + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "type": "array" + }, + "tool_choice": { + "deprecated": true, + "$ref": "#/components/schemas/ToolChoice" + }, + "tool_prompt_format": { + "deprecated": true, + "$ref": "#/components/schemas/ToolPromptFormat" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + }, + "max_infer_iters": { + "title": "Max Infer Iters", + "default": 10, + "type": "integer" + }, + "model": { + "type": "string", + "title": "Model" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "name": { + "title": "Name", + "type": "string" + }, + "enable_session_persistence": { + "title": "Enable Session Persistence", + "default": false, + "type": "boolean" + }, + "response_format": { + "title": "Response Format", + "oneOf": [ + { + "$ref": "#/components/schemas/JsonSchemaResponseFormat" + }, + { + "$ref": "#/components/schemas/GrammarResponseFormat" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "grammar": "#/components/schemas/GrammarResponseFormat", + "json_schema": "#/components/schemas/JsonSchemaResponseFormat" + } + } + } + }, + "type": "object", + "required": [ + "model", + "instructions" + ], + "title": "AgentConfig", + "description": "Configuration for an agent." + }, + "AgentCreateResponse": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + } + }, + "type": "object", + "required": [ + "agent_id" + ], + "title": "AgentCreateResponse", + "description": "Response returned when creating a new agent." + }, + "AgentSessionCreateResponse": { + "properties": { + "session_id": { + "type": "string", + "title": "Session Id" + } + }, + "type": "object", + "required": [ + "session_id" + ], + "title": "AgentSessionCreateResponse", + "description": "Response returned when creating a new agent session." + }, + "AgentToolGroupWithArgs": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "args": { + "additionalProperties": true, + "type": "object", + "title": "Args" + } + }, + "type": "object", + "required": [ + "name", + "args" + ], + "title": "AgentToolGroupWithArgs" + }, + "AgentTurnInputType": { + "properties": { + "type": { + "type": "string", + "const": "agent_turn_input", + "title": "Type", + "default": "agent_turn_input" + } + }, + "type": "object", + "title": "AgentTurnInputType", + "description": "Parameter type for agent turn input." + }, + "AggregationFunctionType": { + "type": "string", + "enum": [ + "average", + "weighted_average", + "median", + "categorical_count", + "accuracy" + ], + "title": "AggregationFunctionType", + "description": "Types of aggregation functions for scoring results." + }, + "AllowedToolsFilter": { + "properties": { + "tool_names": { + "title": "Tool Names", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "AllowedToolsFilter", + "description": "Filter configuration for restricting which MCP tools can be used." + }, + "ApprovalFilter": { + "properties": { + "always": { + "title": "Always", + "items": { + "type": "string" + }, + "type": "array" + }, + "never": { + "title": "Never", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "ApprovalFilter", + "description": "Filter configuration for MCP tool approval requirements." + }, + "ArrayType": { + "properties": { + "type": { + "type": "string", + "const": "array", + "title": "Type", + "default": "array" + } + }, + "type": "object", + "title": "ArrayType", + "description": "Parameter type for array values." + }, + "Attachment-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Attachment", + "description": "An attachment to an agent turn." + }, + "BasicScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "basic", + "title": "Type", + "default": "basic" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "BasicScoringFnParams", + "description": "Parameters for basic scoring function configuration." + }, + "Batch": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "object": { + "type": "string", + "const": "batch", + "title": "Object" + }, + "status": { + "type": "string", + "enum": [ + "validating", + "failed", + "in_progress", + "finalizing", + "completed", + "expired", + "cancelling", + "cancelled" + ], + "title": "Status" + }, + "cancelled_at": { + "title": "Cancelled At", + "type": "integer" + }, + "cancelling_at": { + "title": "Cancelling At", + "type": "integer" + }, + "completed_at": { + "title": "Completed At", + "type": "integer" + }, + "error_file_id": { + "title": "Error File Id", + "type": "string" + }, + "errors": { + "$ref": "#/components/schemas/Errors" + }, + "expired_at": { + "title": "Expired At", + "type": "integer" + }, + "expires_at": { + "title": "Expires At", + "type": "integer" + }, + "failed_at": { + "title": "Failed At", + "type": "integer" + }, + "finalizing_at": { + "title": "Finalizing At", + "type": "integer" + }, + "in_progress_at": { + "title": "In Progress At", + "type": "integer" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "model": { + "title": "Model", + "type": "string" + }, + "output_file_id": { + "title": "Output File Id", + "type": "string" + }, + "request_counts": { + "$ref": "#/components/schemas/BatchRequestCounts" + }, + "usage": { + "$ref": "#/components/schemas/BatchUsage" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "id", + "completion_window", + "created_at", + "endpoint", + "input_file_id", + "object", + "status" + ], + "title": "Batch" + }, + "BatchError": { + "properties": { + "code": { + "title": "Code", + "type": "string" + }, + "line": { + "title": "Line", + "type": "integer" + }, + "message": { + "title": "Message", + "type": "string" + }, + "param": { + "title": "Param", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "title": "BatchError" + }, + "BatchRequestCounts": { + "properties": { + "completed": { + "type": "integer", + "title": "Completed" + }, + "failed": { + "type": "integer", + "title": "Failed" + }, + "total": { + "type": "integer", + "title": "Total" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "completed", + "failed", + "total" + ], + "title": "BatchRequestCounts" + }, + "BatchUsage": { + "properties": { + "input_tokens": { + "type": "integer", + "title": "Input Tokens" + }, + "input_tokens_details": { + "$ref": "#/components/schemas/InputTokensDetails" + }, + "output_tokens": { + "type": "integer", + "title": "Output Tokens" + }, + "output_tokens_details": { + "$ref": "#/components/schemas/OutputTokensDetails" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "input_tokens", + "input_tokens_details", + "output_tokens", + "output_tokens_details", + "total_tokens" + ], + "title": "BatchUsage" + }, + "Benchmark": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "benchmark", + "title": "Type", + "default": "benchmark" + }, + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "scoring_functions": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Scoring Functions" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Metadata for this evaluation task" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "dataset_id", + "scoring_functions" + ], + "title": "Benchmark", + "description": "A benchmark resource for evaluating model performance." + }, + "BenchmarkConfig": { + "properties": { + "eval_candidate": { + "oneOf": [ + { + "$ref": "#/components/schemas/ModelCandidate" + }, + { + "$ref": "#/components/schemas/AgentCandidate" + } + ], + "title": "Eval Candidate", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent": "#/components/schemas/AgentCandidate", + "model": "#/components/schemas/ModelCandidate" + } + } + }, + "scoring_params": { + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + }, + "type": "object", + "title": "Scoring Params", + "description": "Map between scoring function id and parameters for each scoring function you want to run" + }, + "num_examples": { + "title": "Num Examples", + "description": "Number of examples to evaluate (useful for testing), if not provided, all examples in the dataset will be evaluated", + "type": "integer" + } + }, + "type": "object", + "required": [ + "eval_candidate" + ], + "title": "BenchmarkConfig", + "description": "A benchmark configuration for evaluation." + }, + "BooleanType": { + "properties": { + "type": { + "type": "string", + "const": "boolean", + "title": "Type", + "default": "boolean" + } + }, + "type": "object", + "title": "BooleanType", + "description": "Parameter type for boolean values." + }, + "BuiltinTool": { + "type": "string", + "enum": [ + "brave_search", + "wolfram_alpha", + "photogen", + "code_interpreter" + ], + "title": "BuiltinTool" + }, + "ChatCompletionInputType": { + "properties": { + "type": { + "type": "string", + "const": "chat_completion_input", + "title": "Type", + "default": "chat_completion_input" + } + }, + "type": "object", + "title": "ChatCompletionInputType", + "description": "Parameter type for chat completion input." + }, + "Chunk-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "chunk_id": { + "type": "string", + "title": "Chunk Id" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "embedding": { + "title": "Embedding", + "items": { + "type": "number" + }, + "type": "array" + }, + "chunk_metadata": { + "$ref": "#/components/schemas/ChunkMetadata" + } + }, + "type": "object", + "required": [ + "content", + "chunk_id" + ], + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." + }, + "ChunkMetadata": { + "properties": { + "chunk_id": { + "title": "Chunk Id", + "type": "string" + }, + "document_id": { + "title": "Document Id", + "type": "string" + }, + "source": { + "title": "Source", + "type": "string" + }, + "created_timestamp": { + "title": "Created Timestamp", + "type": "integer" + }, + "updated_timestamp": { + "title": "Updated Timestamp", + "type": "integer" + }, + "chunk_window": { + "title": "Chunk Window", + "type": "string" + }, + "chunk_tokenizer": { + "title": "Chunk Tokenizer", + "type": "string" + }, + "chunk_embedding_model": { + "title": "Chunk Embedding Model", + "type": "string" + }, + "chunk_embedding_dimension": { + "title": "Chunk Embedding Dimension", + "type": "integer" + }, + "content_token_count": { + "title": "Content Token Count", + "type": "integer" + }, + "metadata_token_count": { + "title": "Metadata Token Count", + "type": "integer" + } + }, + "type": "object", + "title": "ChunkMetadata", + "description": "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference." + }, + "CompletionInputType": { + "properties": { + "type": { + "type": "string", + "const": "completion_input", + "title": "Type", + "default": "completion_input" + } + }, + "type": "object", + "title": "CompletionInputType", + "description": "Parameter type for completion input." + }, + "CompletionMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "description": "A message containing the model's (assistant) response in a chat conversation." + }, + "Conversation": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "The unique ID of the conversation." + }, + "object": { + "type": "string", + "const": "conversation", + "title": "Object", + "description": "The object type, which is always conversation.", + "default": "conversation" + }, + "created_at": { + "type": "integer", + "title": "Created At", + "description": "The time at which the conversation was created, measured in seconds since the Unix epoch." + }, + "metadata": { + "title": "Metadata", + "description": "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.", + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "items": { + "title": "Items", + "description": "Initial items to include in the conversation context. You may add up to 20 items at a time.", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "id", + "created_at" + ], + "title": "Conversation", + "description": "OpenAI-compatible conversation object." + }, + "ConversationItemInclude": { + "type": "string", + "enum": [ + "web_search_call.action.sources", + "code_interpreter_call.outputs", + "computer_call_output.output.image_url", + "file_search_call.results", + "message.input_image.image_url", + "message.output_text.logprobs", + "reasoning.encrypted_content" + ], + "title": "ConversationItemInclude", + "description": "Specify additional output data to include in the model response." + }, + "ConversationItemList": { + "properties": { + "object": { + "type": "string", + "title": "Object", + "description": "Object type", + "default": "list" + }, + "data": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Output" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/components/schemas/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Output", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "type": "array", + "title": "Data", + "description": "List of conversation items" + }, + "first_id": { + "title": "First Id", + "description": "The ID of the first item in the list", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "description": "The ID of the last item in the list", + "type": "string" + }, + "has_more": { + "type": "boolean", + "title": "Has More", + "description": "Whether there are more items available", + "default": false + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ConversationItemList", + "description": "List of conversation items with pagination." + }, + "DPOAlignmentConfig": { + "properties": { + "beta": { + "type": "number", + "title": "Beta" + }, + "loss_type": { + "$ref": "#/components/schemas/DPOLossType", + "default": "sigmoid" + } + }, + "type": "object", + "required": [ + "beta" + ], + "title": "DPOAlignmentConfig", + "description": "Configuration for Direct Preference Optimization (DPO) alignment." + }, + "DPOLossType": { + "type": "string", + "enum": [ + "sigmoid", + "hinge", + "ipo", + "kto_pair" + ], + "title": "DPOLossType" + }, + "DataConfig": { + "properties": { + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "batch_size": { + "type": "integer", + "title": "Batch Size" + }, + "shuffle": { + "type": "boolean", + "title": "Shuffle" + }, + "data_format": { + "$ref": "#/components/schemas/DatasetFormat" + }, + "validation_dataset_id": { + "title": "Validation Dataset Id", + "type": "string" + }, + "packed": { + "title": "Packed", + "default": false, + "type": "boolean" + }, + "train_on_input": { + "title": "Train On Input", + "default": false, + "type": "boolean" + } + }, + "type": "object", + "required": [ + "dataset_id", + "batch_size", + "shuffle", + "data_format" + ], + "title": "DataConfig", + "description": "Configuration for training data and data loading." + }, + "Dataset": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "dataset", + "title": "Type", + "default": "dataset" + }, + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "source": { + "oneOf": [ + { + "$ref": "#/components/schemas/URIDataSource" + }, + { + "$ref": "#/components/schemas/RowsDataSource" + } + ], + "title": "Source", + "discriminator": { + "propertyName": "type", + "mapping": { + "rows": "#/components/schemas/RowsDataSource", + "uri": "#/components/schemas/URIDataSource" + } + } + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this dataset" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "purpose", + "source" + ], + "title": "Dataset", + "description": "Dataset resource for storing and accessing training or evaluation data." + }, + "DatasetFormat": { + "type": "string", + "enum": [ + "instruct", + "dialog" + ], + "title": "DatasetFormat", + "description": "Format of the training dataset." + }, + "DatasetPurpose": { + "type": "string", + "enum": [ + "post-training/messages", + "eval/question-answer", + "eval/messages-answer" + ], + "title": "DatasetPurpose", + "description": "Purpose of the dataset. Each purpose has a required input data schema." + }, + "DefaultRAGQueryGeneratorConfig": { + "properties": { + "type": { + "type": "string", + "const": "default", + "title": "Type", + "default": "default" + }, + "separator": { + "type": "string", + "title": "Separator", + "default": " " + } + }, + "type": "object", + "title": "DefaultRAGQueryGeneratorConfig", + "description": "Configuration for the default RAG query generator." + }, + "Document": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/URL" + } + ], + "title": "Content" + }, + "mime_type": { + "type": "string", + "title": "Mime Type" + } + }, + "type": "object", + "required": [ + "content", + "mime_type" + ], + "title": "Document", + "description": "A document to be used by an agent." + }, + "EfficiencyConfig": { + "properties": { + "enable_activation_checkpointing": { + "title": "Enable Activation Checkpointing", + "default": false, + "type": "boolean" + }, + "enable_activation_offloading": { + "title": "Enable Activation Offloading", + "default": false, + "type": "boolean" + }, + "memory_efficient_fsdp_wrap": { + "title": "Memory Efficient Fsdp Wrap", + "default": false, + "type": "boolean" + }, + "fsdp_cpu_offload": { + "title": "Fsdp Cpu Offload", + "default": false, + "type": "boolean" + } + }, + "type": "object", + "title": "EfficiencyConfig", + "description": "Configuration for memory and compute efficiency optimizations." + }, + "Errors": { + "properties": { + "data": { + "title": "Data", + "items": { + "$ref": "#/components/schemas/BatchError" + }, + "type": "array" + }, + "object": { + "title": "Object", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "title": "Errors" + }, + "EvaluateResponse": { + "properties": { + "generations": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Generations" + }, + "scores": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Scores" + } + }, + "type": "object", + "required": [ + "generations", + "scores" + ], + "title": "EvaluateResponse", + "description": "The response from an evaluation." + }, + "GrammarResponseFormat": { + "properties": { + "type": { + "type": "string", + "const": "grammar", + "title": "Type", + "default": "grammar" + }, + "bnf": { + "additionalProperties": true, + "type": "object", + "title": "Bnf" + } + }, + "type": "object", + "required": [ + "bnf" + ], + "title": "GrammarResponseFormat", + "description": "Configuration for grammar-guided response generation." + }, + "GreedySamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "greedy", + "title": "Type", + "default": "greedy" + } + }, + "type": "object", + "title": "GreedySamplingStrategy", + "description": "Greedy sampling strategy that selects the highest probability token at each step." + }, + "HealthInfo": { + "properties": { + "status": { + "$ref": "#/components/schemas/HealthStatus" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "HealthInfo", + "description": "Health status information for the service." + }, + "HealthStatus": { + "type": "string", + "enum": [ + "OK", + "Error", + "Not Implemented" + ], + "title": "HealthStatus" + }, + "ImageContentItem-Input": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "ImageContentItem-Output": { + "properties": { + "type": { + "type": "string", + "const": "image", + "title": "Type", + "default": "image" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "type": "object", + "required": [ + "image" + ], + "title": "ImageContentItem", + "description": "A image content item" + }, + "InferenceStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "inference", + "title": "Step Type", + "default": "inference" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage-Output" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "description": "An inference step in an agent turn." + }, + "InputTokensDetails": { + "properties": { + "cached_tokens": { + "type": "integer", + "title": "Cached Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "cached_tokens" + ], + "title": "InputTokensDetails" + }, + "Job": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + } + }, + "type": "object", + "required": [ + "job_id", + "status" + ], + "title": "Job", + "description": "A job execution instance with status tracking." + }, + "JobStatus": { + "type": "string", + "enum": [ + "completed", + "in_progress", + "failed", + "scheduled", + "cancelled" + ], + "title": "JobStatus", + "description": "Status of a job execution." + }, + "JsonSchemaResponseFormat": { + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "title": "Type", + "default": "json_schema" + }, + "json_schema": { + "additionalProperties": true, + "type": "object", + "title": "Json Schema" + } + }, + "type": "object", + "required": [ + "json_schema" + ], + "title": "JsonSchemaResponseFormat", + "description": "Configuration for JSON schema-guided response generation." + }, + "JsonType": { + "properties": { + "type": { + "type": "string", + "const": "json", + "title": "Type", + "default": "json" + } + }, + "type": "object", + "title": "JsonType", + "description": "Parameter type for JSON values." + }, + "LLMAsJudgeScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "llm_as_judge", + "title": "Type", + "default": "llm_as_judge" + }, + "judge_model": { + "type": "string", + "title": "Judge Model" + }, + "prompt_template": { + "title": "Prompt Template", + "type": "string" + }, + "judge_score_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Judge Score Regexes", + "description": "Regexes to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "required": [ + "judge_model" + ], + "title": "LLMAsJudgeScoringFnParams", + "description": "Parameters for LLM-as-judge scoring function configuration." + }, + "LLMRAGQueryGeneratorConfig": { + "properties": { + "type": { + "type": "string", + "const": "llm", + "title": "Type", + "default": "llm" + }, + "model": { + "type": "string", + "title": "Model" + }, + "template": { + "type": "string", + "title": "Template" + } + }, + "type": "object", + "required": [ + "model", + "template" + ], + "title": "LLMRAGQueryGeneratorConfig", + "description": "Configuration for the LLM-based RAG query generator." + }, + "ListBenchmarksResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Benchmark" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListBenchmarksResponse" + }, + "ListDatasetsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Dataset" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListDatasetsResponse", + "description": "Response from listing datasets." + }, + "ListModelsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Model" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListModelsResponse" + }, + "ListPostTrainingJobsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/PostTrainingJob" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListPostTrainingJobsResponse" + }, + "ListPromptsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Prompt" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListPromptsResponse", + "description": "Response model to list prompts." + }, + "ListProvidersResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ProviderInfo" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListProvidersResponse", + "description": "Response containing a list of all available providers." + }, + "ListRoutesResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/RouteInfo" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListRoutesResponse", + "description": "Response containing a list of all available API routes." + }, + "ListScoringFunctionsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ScoringFn-Output" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListScoringFunctionsResponse" + }, + "ListShieldsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/Shield" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListShieldsResponse" + }, + "ListToolGroupsResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ToolGroup" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "ListToolGroupsResponse", + "description": "Response containing a list of tool groups." + }, + "MCPListToolsTool": { + "properties": { + "input_schema": { + "additionalProperties": true, + "type": "object", + "title": "Input Schema" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + } + }, + "type": "object", + "required": [ + "input_schema", + "name" + ], + "title": "MCPListToolsTool", + "description": "Tool definition returned by MCP list tools operation." + }, + "MemoryRetrievalStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "memory_retrieval", + "title": "Step Type", + "default": "memory_retrieval" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "description": "A memory retrieval step in an agent turn." + }, + "Model": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "model", + "title": "Type", + "default": "model" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this model" + }, + "model_type": { + "$ref": "#/components/schemas/ModelType", + "default": "llm" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "Model", + "description": "A model resource representing an AI model registered in Llama Stack." + }, + "ModelCandidate": { + "properties": { + "type": { + "type": "string", + "const": "model", + "title": "Type", + "default": "model" + }, + "model": { + "type": "string", + "title": "Model" + }, + "sampling_params": { + "$ref": "#/components/schemas/SamplingParams" + }, + "system_message": { + "$ref": "#/components/schemas/SystemMessage" + } + }, + "type": "object", + "required": [ + "model", + "sampling_params" + ], + "title": "ModelCandidate", + "description": "A model candidate for evaluation." + }, + "ModelType": { + "type": "string", + "enum": [ + "llm", + "embedding", + "rerank" + ], + "title": "ModelType", + "description": "Enumeration of supported model types in Llama Stack." + }, + "ModerationObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "results": { + "items": { + "$ref": "#/components/schemas/ModerationObjectResults" + }, + "type": "array", + "title": "Results" + } + }, + "type": "object", + "required": [ + "id", + "model", + "results" + ], + "title": "ModerationObject", + "description": "A moderation object." + }, + "ModerationObjectResults": { + "properties": { + "flagged": { + "type": "boolean", + "title": "Flagged" + }, + "categories": { + "title": "Categories", + "additionalProperties": { + "type": "boolean" + }, + "type": "object" + }, + "category_applied_input_types": { + "title": "Category Applied Input Types", + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "type": "object" + }, + "category_scores": { + "title": "Category Scores", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "flagged" + ], + "title": "ModerationObjectResults", + "description": "A moderation object." + }, + "NumberType": { + "properties": { + "type": { + "type": "string", + "const": "number", + "title": "Type", + "default": "number" + } + }, + "type": "object", + "title": "NumberType", + "description": "Parameter type for numeric values." + }, + "ObjectType": { + "properties": { + "type": { + "type": "string", + "const": "object", + "title": "Type", + "default": "object" + } + }, + "type": "object", + "title": "ObjectType", + "description": "Parameter type for object values." + }, + "OpenAIAssistantMessageParam-Input": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIAssistantMessageParam-Output": { + "properties": { + "role": { + "type": "string", + "const": "assistant", + "title": "Role", + "default": "assistant" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletion": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChoice-Output" + }, + "type": "array", + "title": "Choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "title": "Object", + "default": "chat.completion" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "model": { + "type": "string", + "title": "Model" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage" + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAIChatCompletion", + "description": "Response from an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletionContentPartImageParam": { + "properties": { + "type": { + "type": "string", + "const": "image_url", + "title": "Type", + "default": "image_url" + }, + "image_url": { + "$ref": "#/components/schemas/OpenAIImageURL" + } + }, + "type": "object", + "required": [ + "image_url" + ], + "title": "OpenAIChatCompletionContentPartImageParam", + "description": "Image content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionContentPartTextParam": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIChatCompletionContentPartTextParam", + "description": "Text content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "messages": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Input" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Input", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Input" + } + } + }, + "type": "array", + "minItems": 1, + "title": "Messages" + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number" + }, + "function_call": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ], + "title": "Function Call" + }, + "functions": { + "title": "Functions", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + "logit_bias": { + "title": "Logit Bias", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "logprobs": { + "title": "Logprobs", + "type": "boolean" + }, + "max_completion_tokens": { + "title": "Max Completion Tokens", + "type": "integer" + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "n": { + "title": "N", + "type": "integer" + }, + "parallel_tool_calls": { + "title": "Parallel Tool Calls", + "type": "boolean" + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number" + }, + "response_format": { + "title": "Response Format", + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseFormatText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONSchema" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONObject" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "json_object": "#/components/schemas/OpenAIResponseFormatJSONObject", + "json_schema": "#/components/schemas/OpenAIResponseFormatJSONSchema", + "text": "#/components/schemas/OpenAIResponseFormatText" + } + } + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Stop" + }, + "stream": { + "title": "Stream", + "type": "boolean" + }, + "stream_options": { + "title": "Stream Options", + "additionalProperties": true, + "type": "object" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "tool_choice": { + "anyOf": [ + { + "type": "string" + }, + { + "additionalProperties": true, + "type": "object" + } + ], + "title": "Tool Choice" + }, + "tools": { + "title": "Tools", + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array" + }, + "top_logprobs": { + "title": "Top Logprobs", + "type": "integer" + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "user": { + "title": "User", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "messages" + ], + "title": "OpenAIChatCompletionRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible chat completion endpoint." + }, + "OpenAIChatCompletionToolCall": { + "properties": { + "index": { + "title": "Index", + "type": "integer" + }, + "id": { + "title": "Id", + "type": "string" + }, + "type": { + "type": "string", + "const": "function", + "title": "Type", + "default": "function" + }, + "function": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction" + } + }, + "type": "object", + "title": "OpenAIChatCompletionToolCall", + "description": "Tool call specification for OpenAI-compatible chat completion responses." + }, + "OpenAIChatCompletionToolCallFunction": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "arguments": { + "title": "Arguments", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIChatCompletionToolCallFunction", + "description": "Function call details for OpenAI-compatible tool calls." + }, + "OpenAIChatCompletionUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "completion_tokens": { + "type": "integer", + "title": "Completion Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + }, + "prompt_tokens_details": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsagePromptTokensDetails" + }, + "completion_tokens_details": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsageCompletionTokensDetails" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "completion_tokens", + "total_tokens" + ], + "title": "OpenAIChatCompletionUsage", + "description": "Usage information for OpenAI chat completion." + }, + "OpenAIChatCompletionUsageCompletionTokensDetails": { + "properties": { + "reasoning_tokens": { + "title": "Reasoning Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIChatCompletionUsageCompletionTokensDetails", + "description": "Token details for output tokens in OpenAI chat completion usage." + }, + "OpenAIChatCompletionUsagePromptTokensDetails": { + "properties": { + "cached_tokens": { + "title": "Cached Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIChatCompletionUsagePromptTokensDetails", + "description": "Token details for prompt tokens in OpenAI chat completion usage." + }, + "OpenAIChoice-Output": { + "properties": { + "message": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam-Output" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam-Output" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message", + "discriminator": { + "propertyName": "role", + "mapping": { + "assistant": "#/components/schemas/OpenAIAssistantMessageParam-Output", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "user": "#/components/schemas/OpenAIUserMessageParam-Output" + } + } + }, + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Output" + } + }, + "type": "object", + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "description": "A choice from an OpenAI-compatible chat completion response." + }, + "OpenAIChoiceLogprobs-Output": { + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array" + } + }, + "type": "object", + "title": "OpenAIChoiceLogprobs", + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." + }, + "OpenAICompletion": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAICompletionChoice-Output" + }, + "type": "array", + "title": "Choices" + }, + "created": { + "type": "integer", + "title": "Created" + }, + "model": { + "type": "string", + "title": "Model" + }, + "object": { + "type": "string", + "const": "text_completion", + "title": "Object", + "default": "text_completion" + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model" + ], + "title": "OpenAICompletion", + "description": "Response from an OpenAI-compatible completion request." + }, + "OpenAICompletionChoice-Output": { + "properties": { + "finish_reason": { + "type": "string", + "title": "Finish Reason" + }, + "text": { + "type": "string", + "title": "Text" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs-Output" + } + }, + "type": "object", + "required": [ + "finish_reason", + "text", + "index" + ], + "title": "OpenAICompletionChoice", + "description": "A choice from an OpenAI-compatible completion response." + }, + "OpenAICompletionRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "items": { + "type": "integer" + }, + "type": "array" + }, + { + "items": { + "items": { + "type": "integer" + }, + "type": "array" + }, + "type": "array" + } + ], + "title": "Prompt" + }, + "best_of": { + "title": "Best Of", + "type": "integer" + }, + "echo": { + "title": "Echo", + "type": "boolean" + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number" + }, + "logit_bias": { + "title": "Logit Bias", + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "logprobs": { + "title": "Logprobs", + "type": "boolean" + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "n": { + "title": "N", + "type": "integer" + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number" + }, + "seed": { + "title": "Seed", + "type": "integer" + }, + "stop": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Stop" + }, + "stream": { + "title": "Stream", + "type": "boolean" + }, + "stream_options": { + "title": "Stream Options", + "additionalProperties": true, + "type": "object" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "user": { + "title": "User", + "type": "string" + }, + "suffix": { + "title": "Suffix", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "prompt" + ], + "title": "OpenAICompletionRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible completion endpoint." + }, + "OpenAICreateVectorStoreFileBatchRequestWithExtraBody": { + "properties": { + "file_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "File Ids" + }, + "attributes": { + "title": "Attributes", + "additionalProperties": true, + "type": "object" + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "file_ids" + ], + "title": "OpenAICreateVectorStoreFileBatchRequestWithExtraBody", + "description": "Request to create a vector store file batch with extra_body support." + }, + "OpenAICreateVectorStoreRequestWithExtraBody": { + "properties": { + "name": { + "title": "Name", + "type": "string" + }, + "file_ids": { + "title": "File Ids", + "items": { + "type": "string" + }, + "type": "array" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object" + }, + "chunking_strategy": { + "title": "Chunking Strategy", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "additionalProperties": true, + "type": "object", + "title": "OpenAICreateVectorStoreRequestWithExtraBody", + "description": "Request to create a vector store with extra_body support." + }, + "OpenAIDeveloperMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "developer", + "title": "Role", + "default": "developer" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIDeveloperMessageParam", + "description": "A message from the developer in an OpenAI-compatible chat completion request." + }, + "OpenAIEmbeddingData": { + "properties": { + "object": { + "type": "string", + "const": "embedding", + "title": "Object", + "default": "embedding" + }, + "embedding": { + "anyOf": [ + { + "items": { + "type": "number" + }, + "type": "array" + }, + { + "type": "string" + } + ], + "title": "Embedding" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "embedding", + "index" + ], + "title": "OpenAIEmbeddingData", + "description": "A single embedding data object from an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingUsage": { + "properties": { + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + } + }, + "type": "object", + "required": [ + "prompt_tokens", + "total_tokens" + ], + "title": "OpenAIEmbeddingUsage", + "description": "Usage information for an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingsRequestWithExtraBody": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "input": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + } + ], + "title": "Input" + }, + "encoding_format": { + "title": "Encoding Format", + "default": "float", + "type": "string" + }, + "dimensions": { + "title": "Dimensions", + "type": "integer" + }, + "user": { + "title": "User", + "type": "string" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "model", + "input" + ], + "title": "OpenAIEmbeddingsRequestWithExtraBody", + "description": "Request parameters for OpenAI-compatible embeddings endpoint." + }, + "OpenAIEmbeddingsResponse": { + "properties": { + "object": { + "type": "string", + "const": "list", + "title": "Object", + "default": "list" + }, + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIEmbeddingData" + }, + "type": "array", + "title": "Data" + }, + "model": { + "type": "string", + "title": "Model" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIEmbeddingUsage" + } + }, + "type": "object", + "required": [ + "data", + "model", + "usage" + ], + "title": "OpenAIEmbeddingsResponse", + "description": "Response from an OpenAI-compatible embeddings request." + }, + "OpenAIFile": { + "properties": { + "type": { + "type": "string", + "const": "file", + "title": "Type", + "default": "file" + }, + "file": { + "$ref": "#/components/schemas/OpenAIFileFile" + } + }, + "type": "object", + "required": [ + "file" + ], + "title": "OpenAIFile" + }, + "OpenAIFileFile": { + "properties": { + "file_data": { + "title": "File Data", + "type": "string" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIFileFile" + }, + "OpenAIFileObject": { + "properties": { + "object": { + "type": "string", + "const": "file", + "title": "Object", + "default": "file" + }, + "id": { + "type": "string", + "title": "Id" + }, + "bytes": { + "type": "integer", + "title": "Bytes" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "expires_at": { + "type": "integer", + "title": "Expires At" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "purpose": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } + }, + "type": "object", + "required": [ + "id", + "bytes", + "created_at", + "expires_at", + "filename", + "purpose" + ], + "title": "OpenAIFileObject", + "description": "OpenAI File object as defined in the OpenAI Files API." + }, + "OpenAIFilePurpose": { + "type": "string", + "enum": [ + "assistants", + "batch" + ], + "title": "OpenAIFilePurpose", + "description": "Valid purpose values for OpenAI Files API." + }, + "OpenAIImageURL": { + "properties": { + "url": { + "type": "string", + "title": "Url" + }, + "detail": { + "title": "Detail", + "type": "string" + } + }, + "type": "object", + "required": [ + "url" + ], + "title": "OpenAIImageURL", + "description": "Image URL specification for OpenAI-compatible chat completion messages." + }, + "OpenAIJSONSchema": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "strict": { + "title": "Strict", + "type": "boolean" + }, + "schema": { + "title": "Schema", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "title": "OpenAIJSONSchema", + "description": "JSON schema specification for OpenAI-compatible structured response format." + }, + "OpenAIResponseAnnotationCitation": { + "properties": { + "type": { + "type": "string", + "const": "url_citation", + "title": "Type", + "default": "url_citation" + }, + "end_index": { + "type": "integer", + "title": "End Index" + }, + "start_index": { + "type": "integer", + "title": "Start Index" + }, + "title": { + "type": "string", + "title": "Title" + }, + "url": { + "type": "string", + "title": "Url" + } + }, + "type": "object", + "required": [ + "end_index", + "start_index", + "title", + "url" + ], + "title": "OpenAIResponseAnnotationCitation", + "description": "URL citation annotation for referencing external web resources." + }, + "OpenAIResponseAnnotationContainerFileCitation": { + "properties": { + "type": { + "type": "string", + "const": "container_file_citation", + "title": "Type", + "default": "container_file_citation" + }, + "container_id": { + "type": "string", + "title": "Container Id" + }, + "end_index": { + "type": "integer", + "title": "End Index" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "start_index": { + "type": "integer", + "title": "Start Index" + } + }, + "type": "object", + "required": [ + "container_id", + "end_index", + "file_id", + "filename", + "start_index" + ], + "title": "OpenAIResponseAnnotationContainerFileCitation" + }, + "OpenAIResponseAnnotationFileCitation": { + "properties": { + "type": { + "type": "string", + "const": "file_citation", + "title": "Type", + "default": "file_citation" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "file_id", + "filename", + "index" + ], + "title": "OpenAIResponseAnnotationFileCitation", + "description": "File citation annotation for referencing specific files in response content." + }, + "OpenAIResponseAnnotationFilePath": { + "properties": { + "type": { + "type": "string", + "const": "file_path", + "title": "Type", + "default": "file_path" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "index": { + "type": "integer", + "title": "Index" + } + }, + "type": "object", + "required": [ + "file_id", + "index" + ], + "title": "OpenAIResponseAnnotationFilePath" + }, + "OpenAIResponseContentPartRefusal": { + "properties": { + "type": { + "type": "string", + "const": "refusal", + "title": "Type", + "default": "refusal" + }, + "refusal": { + "type": "string", + "title": "Refusal" + } + }, + "type": "object", + "required": [ + "refusal" + ], + "title": "OpenAIResponseContentPartRefusal", + "description": "Refusal content within a streamed response part." + }, + "OpenAIResponseError": { + "properties": { + "code": { + "type": "string", + "title": "Code" + }, + "message": { + "type": "string", + "title": "Message" + } + }, + "type": "object", + "required": [ + "code", + "message" + ], + "title": "OpenAIResponseError", + "description": "Error details for failed OpenAI response requests." + }, + "OpenAIResponseFormatJSONObject": { + "properties": { + "type": { + "type": "string", + "const": "json_object", + "title": "Type", + "default": "json_object" + } + }, + "type": "object", + "title": "OpenAIResponseFormatJSONObject", + "description": "JSON object response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatJSONSchema": { + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "title": "Type", + "default": "json_schema" + }, + "json_schema": { + "$ref": "#/components/schemas/OpenAIJSONSchema" + } + }, + "type": "object", + "required": [ + "json_schema" + ], + "title": "OpenAIResponseFormatJSONSchema", + "description": "JSON schema response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatText": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + } + }, + "type": "object", + "title": "OpenAIResponseFormatText", + "description": "Text response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseInputFunctionToolCallOutput": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "output": { + "type": "string", + "title": "Output" + }, + "type": { + "type": "string", + "const": "function_call_output", + "title": "Type", + "default": "function_call_output" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "call_id", + "output" + ], + "title": "OpenAIResponseInputFunctionToolCallOutput", + "description": "This represents the output of a function call that gets passed back to the model." + }, + "OpenAIResponseInputMessageContentFile": { + "properties": { + "type": { + "type": "string", + "const": "input_file", + "title": "Type", + "default": "input_file" + }, + "file_data": { + "title": "File Data", + "type": "string" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "file_url": { + "title": "File Url", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIResponseInputMessageContentFile", + "description": "File content for input messages in OpenAI response format." + }, + "OpenAIResponseInputMessageContentImage": { + "properties": { + "detail": { + "anyOf": [ + { + "type": "string", + "const": "low" + }, + { + "type": "string", + "const": "high" + }, + { + "type": "string", + "const": "auto" + } + ], + "title": "Detail", + "default": "auto" + }, + "type": { + "type": "string", + "const": "input_image", + "title": "Type", + "default": "input_image" + }, + "file_id": { + "title": "File Id", + "type": "string" + }, + "image_url": { + "title": "Image Url", + "type": "string" + } + }, + "type": "object", + "title": "OpenAIResponseInputMessageContentImage", + "description": "Image content for input messages in OpenAI response format." + }, + "OpenAIResponseInputMessageContentText": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "input_text", + "title": "Type", + "default": "input_text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIResponseInputMessageContentText", + "description": "Text content for input messages in OpenAI response format." + }, + "OpenAIResponseInputToolFileSearch": { + "properties": { + "type": { + "type": "string", + "const": "file_search", + "title": "Type", + "default": "file_search" + }, + "vector_store_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Vector Store Ids" + }, + "filters": { + "title": "Filters", + "additionalProperties": true, + "type": "object" + }, + "max_num_results": { + "title": "Max Num Results", + "default": 10, + "type": "integer", + "maximum": 50.0, + "minimum": 1.0 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + } + }, + "type": "object", + "required": [ + "vector_store_ids" + ], + "title": "OpenAIResponseInputToolFileSearch", + "description": "File search tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolFunction": { + "properties": { + "type": { + "type": "string", + "const": "function", + "title": "Type", + "default": "function" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "parameters": { + "title": "Parameters", + "additionalProperties": true, + "type": "object" + }, + "strict": { + "title": "Strict", + "type": "boolean" + } + }, + "type": "object", + "required": [ + "name", + "parameters" + ], + "title": "OpenAIResponseInputToolFunction", + "description": "Function tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolMCP": { + "properties": { + "type": { + "type": "string", + "const": "mcp", + "title": "Type", + "default": "mcp" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "server_url": { + "type": "string", + "title": "Server Url" + }, + "headers": { + "title": "Headers", + "additionalProperties": true, + "type": "object" + }, + "require_approval": { + "anyOf": [ + { + "type": "string", + "const": "always" + }, + { + "type": "string", + "const": "never" + }, + { + "$ref": "#/components/schemas/ApprovalFilter" + } + ], + "title": "Require Approval", + "default": "never" + }, + "allowed_tools": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/AllowedToolsFilter" + } + ], + "title": "Allowed Tools" + } + }, + "type": "object", + "required": [ + "server_label", + "server_url" + ], + "title": "OpenAIResponseInputToolMCP", + "description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs." + }, + "OpenAIResponseInputToolWebSearch": { + "properties": { + "type": { + "anyOf": [ + { + "type": "string", + "const": "web_search" + }, + { + "type": "string", + "const": "web_search_preview" + }, + { + "type": "string", + "const": "web_search_preview_2025_03_11" + } + ], + "title": "Type", + "default": "web_search" + }, + "search_context_size": { + "title": "Search Context Size", + "default": "medium", + "type": "string", + "pattern": "^low|medium|high$" + } + }, + "type": "object", + "title": "OpenAIResponseInputToolWebSearch", + "description": "Web search tool configuration for OpenAI response inputs." + }, + "OpenAIResponseMCPApprovalRequest": { + "properties": { + "arguments": { + "type": "string", + "title": "Arguments" + }, + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "type": { + "type": "string", + "const": "mcp_approval_request", + "title": "Type", + "default": "mcp_approval_request" + } + }, + "type": "object", + "required": [ + "arguments", + "id", + "name", + "server_label" + ], + "title": "OpenAIResponseMCPApprovalRequest", + "description": "A request for human approval of a tool invocation." + }, + "OpenAIResponseMCPApprovalResponse": { + "properties": { + "approval_request_id": { + "type": "string", + "title": "Approval Request Id" + }, + "approve": { + "type": "boolean", + "title": "Approve" + }, + "type": { + "type": "string", + "const": "mcp_approval_response", + "title": "Type", + "default": "mcp_approval_response" + }, + "id": { + "title": "Id", + "type": "string" + }, + "reason": { + "title": "Reason", + "type": "string" + } + }, + "type": "object", + "required": [ + "approval_request_id", + "approve" + ], + "title": "OpenAIResponseMCPApprovalResponse", + "description": "A response to an MCP approval request." + }, + "OpenAIResponseMessage-Input": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "array" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "output_text": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "title": "Role" + }, + "type": { + "type": "string", + "const": "message", + "title": "Type", + "default": "message" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." + }, + "OpenAIResponseMessage-Output": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "array" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "output_text": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ], + "title": "Role" + }, + "type": { + "type": "string", + "const": "message", + "title": "Type", + "default": "message" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios." + }, + "OpenAIResponseObject": { + "properties": { + "created_at": { + "type": "integer", + "title": "Created At" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError" + }, + "id": { + "type": "string", + "title": "Id" + }, + "model": { + "type": "string", + "title": "Model" + }, + "object": { + "type": "string", + "const": "response", + "title": "Object", + "default": "response" + }, + "output": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Output" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Output", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "type": "array", + "title": "Output" + }, + "parallel_tool_calls": { + "type": "boolean", + "title": "Parallel Tool Calls", + "default": false + }, + "previous_response_id": { + "title": "Previous Response Id", + "type": "string" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "status": { + "type": "string", + "title": "Status" + }, + "temperature": { + "title": "Temperature", + "type": "number" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "default": { + "format": { + "type": "text" + } + } + }, + "top_p": { + "title": "Top P", + "type": "number" + }, + "tools": { + "title": "Tools", + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseToolMCP" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "type": "array" + }, + "truncation": { + "title": "Truncation", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIResponseUsage" + }, + "instructions": { + "title": "Instructions", + "type": "string" + } + }, + "type": "object", + "required": [ + "created_at", + "id", + "model", + "output", + "status" + ], + "title": "OpenAIResponseObject", + "description": "Complete OpenAI response object containing generation results and metadata." + }, + "OpenAIResponseOutputMessageContentOutputText": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "type": { + "type": "string", + "const": "output_text", + "title": "Type", + "default": "output_text" + }, + "annotations": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation" + }, + { + "$ref": "#/components/schemas/OpenAIResponseAnnotationFilePath" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "container_file_citation": "#/components/schemas/OpenAIResponseAnnotationContainerFileCitation", + "file_citation": "#/components/schemas/OpenAIResponseAnnotationFileCitation", + "file_path": "#/components/schemas/OpenAIResponseAnnotationFilePath", + "url_citation": "#/components/schemas/OpenAIResponseAnnotationCitation" + } + } + }, + "type": "array", + "title": "Annotations" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "OpenAIResponseOutputMessageContentOutputText" + }, + "OpenAIResponseOutputMessageFileSearchToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "queries": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Queries" + }, + "status": { + "type": "string", + "title": "Status" + }, + "type": { + "type": "string", + "const": "file_search_call", + "title": "Type", + "default": "file_search_call" + }, + "results": { + "title": "Results", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCallResults" + }, + "type": "array" + } + }, + "type": "object", + "required": [ + "id", + "queries", + "status" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCall", + "description": "File search tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageFileSearchToolCallResults": { + "properties": { + "attributes": { + "additionalProperties": true, + "type": "object", + "title": "Attributes" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "score": { + "type": "number", + "title": "Score" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "attributes", + "file_id", + "filename", + "score", + "text" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCallResults", + "description": "Search results returned by the file search operation." + }, + "OpenAIResponseOutputMessageFunctionToolCall": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "arguments": { + "type": "string", + "title": "Arguments" + }, + "type": { + "type": "string", + "const": "function_call", + "title": "Type", + "default": "function_call" + }, + "id": { + "title": "Id", + "type": "string" + }, + "status": { + "title": "Status", + "type": "string" + } + }, + "type": "object", + "required": [ + "call_id", + "name", + "arguments" + ], + "title": "OpenAIResponseOutputMessageFunctionToolCall", + "description": "Function tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "type": { + "type": "string", + "const": "mcp_call", + "title": "Type", + "default": "mcp_call" + }, + "arguments": { + "type": "string", + "title": "Arguments" + }, + "name": { + "type": "string", + "title": "Name" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "error": { + "title": "Error", + "type": "string" + }, + "output": { + "title": "Output", + "type": "string" + } + }, + "type": "object", + "required": [ + "id", + "arguments", + "name", + "server_label" + ], + "title": "OpenAIResponseOutputMessageMCPCall", + "description": "Model Context Protocol (MCP) call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPListTools": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "type": { + "type": "string", + "const": "mcp_list_tools", + "title": "Type", + "default": "mcp_list_tools" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "tools": { + "items": { + "$ref": "#/components/schemas/MCPListToolsTool" + }, + "type": "array", + "title": "Tools" + } + }, + "type": "object", + "required": [ + "id", + "server_label", + "tools" + ], + "title": "OpenAIResponseOutputMessageMCPListTools", + "description": "MCP list tools output message containing available tools from an MCP server." + }, + "OpenAIResponseOutputMessageWebSearchToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "status": { + "type": "string", + "title": "Status" + }, + "type": { + "type": "string", + "const": "web_search_call", + "title": "Type", + "default": "web_search_call" + } + }, + "type": "object", + "required": [ + "id", + "status" + ], + "title": "OpenAIResponseOutputMessageWebSearchToolCall", + "description": "Web search tool call output message for OpenAI responses." + }, + "OpenAIResponsePrompt": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "variables": { + "title": "Variables", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage", + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText" + } + } + }, + "type": "object" + }, + "version": { + "title": "Version", + "type": "string" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "OpenAIResponsePrompt", + "description": "OpenAI compatible Prompt object that is used in OpenAI responses." + }, + "OpenAIResponseText": { + "properties": { + "format": { + "$ref": "#/components/schemas/OpenAIResponseTextFormat" + } + }, + "type": "object", + "title": "OpenAIResponseText", + "description": "Text response configuration for OpenAI responses." + }, + "OpenAIResponseTextFormat": { + "properties": { + "type": { + "anyOf": [ + { + "type": "string", + "const": "text" + }, + { + "type": "string", + "const": "json_schema" + }, + { + "type": "string", + "const": "json_object" + } + ], + "title": "Type" + }, + "name": { + "title": "Name", + "type": "string" + }, + "schema": { + "title": "Schema", + "additionalProperties": true, + "type": "object" + }, + "description": { + "title": "Description", + "type": "string" + }, + "strict": { + "title": "Strict", + "type": "boolean" + } + }, + "type": "object", + "title": "OpenAIResponseTextFormat", + "description": "Configuration for Responses API text format." + }, + "OpenAIResponseToolMCP": { + "properties": { + "type": { + "type": "string", + "const": "mcp", + "title": "Type", + "default": "mcp" + }, + "server_label": { + "type": "string", + "title": "Server Label" + }, + "allowed_tools": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/AllowedToolsFilter" + } + ], + "title": "Allowed Tools" + } + }, + "type": "object", + "required": [ + "server_label" + ], + "title": "OpenAIResponseToolMCP", + "description": "Model Context Protocol (MCP) tool configuration for OpenAI response object." + }, + "OpenAIResponseUsage": { + "properties": { + "input_tokens": { + "type": "integer", + "title": "Input Tokens" + }, + "output_tokens": { + "type": "integer", + "title": "Output Tokens" + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens" + }, + "input_tokens_details": { + "$ref": "#/components/schemas/OpenAIResponseUsageInputTokensDetails" + }, + "output_tokens_details": { + "$ref": "#/components/schemas/OpenAIResponseUsageOutputTokensDetails" + } + }, + "type": "object", + "required": [ + "input_tokens", + "output_tokens", + "total_tokens" + ], + "title": "OpenAIResponseUsage", + "description": "Usage information for OpenAI response." + }, + "OpenAIResponseUsageInputTokensDetails": { + "properties": { + "cached_tokens": { + "title": "Cached Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIResponseUsageInputTokensDetails", + "description": "Token details for input tokens in OpenAI response usage." + }, + "OpenAIResponseUsageOutputTokensDetails": { + "properties": { + "reasoning_tokens": { + "title": "Reasoning Tokens", + "type": "integer" + } + }, + "type": "object", + "title": "OpenAIResponseUsageOutputTokensDetails", + "description": "Token details for output tokens in OpenAI response usage." + }, + "OpenAISystemMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "system", + "title": "Role", + "default": "system" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAISystemMessageParam", + "description": "A system message providing instructions or context to the model." + }, + "OpenAITokenLogProb": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "title": "Bytes", + "items": { + "type": "integer" + }, + "type": "array" + }, + "logprob": { + "type": "number", + "title": "Logprob" + }, + "top_logprobs": { + "items": { + "$ref": "#/components/schemas/OpenAITopLogProb" + }, + "type": "array", + "title": "Top Logprobs" + } + }, + "type": "object", + "required": [ + "token", + "logprob", + "top_logprobs" + ], + "title": "OpenAITokenLogProb", + "description": "The log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIToolMessageParam": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "tool_call_id": { + "type": "string", + "title": "Tool Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "tool_call_id", + "content" + ], + "title": "OpenAIToolMessageParam", + "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." + }, + "OpenAITopLogProb": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "bytes": { + "title": "Bytes", + "items": { + "type": "integer" + }, + "type": "array" + }, + "logprob": { + "type": "number", + "title": "Logprob" + } + }, + "type": "object", + "required": [ + "token", + "logprob" + ], + "title": "OpenAITopLogProb", + "description": "The top log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIUserMessageParam-Input": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file": "#/components/schemas/OpenAIFile", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OpenAIUserMessageParam-Output": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "file": "#/components/schemas/OpenAIFile", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OptimizerConfig": { + "properties": { + "optimizer_type": { + "$ref": "#/components/schemas/OptimizerType" + }, + "lr": { + "type": "number", + "title": "Lr" + }, + "weight_decay": { + "type": "number", + "title": "Weight Decay" + }, + "num_warmup_steps": { + "type": "integer", + "title": "Num Warmup Steps" + } + }, + "type": "object", + "required": [ + "optimizer_type", + "lr", + "weight_decay", + "num_warmup_steps" + ], + "title": "OptimizerConfig", + "description": "Configuration parameters for the optimization algorithm." + }, + "OptimizerType": { + "type": "string", + "enum": [ + "adam", + "adamw", + "sgd" + ], + "title": "OptimizerType", + "description": "Available optimizer algorithms for training." + }, + "Order": { + "type": "string", + "enum": [ + "asc", + "desc" + ], + "title": "Order", + "description": "Sort order for paginated responses." + }, + "OutputTokensDetails": { + "properties": { + "reasoning_tokens": { + "type": "integer", + "title": "Reasoning Tokens" + } + }, + "additionalProperties": true, + "type": "object", + "required": [ + "reasoning_tokens" + ], + "title": "OutputTokensDetails" + }, + "PostTrainingJob": { + "properties": { + "job_uuid": { + "type": "string", + "title": "Job Uuid" + } + }, + "type": "object", + "required": [ + "job_uuid" + ], + "title": "PostTrainingJob" + }, + "Prompt": { + "properties": { + "prompt": { + "title": "Prompt", + "description": "The system prompt with variable placeholders", + "type": "string" + }, + "version": { + "type": "integer", + "minimum": 1.0, + "title": "Version", + "description": "Version (integer starting at 1, incremented on save)" + }, + "prompt_id": { + "type": "string", + "title": "Prompt Id", + "description": "Unique identifier in format 'pmpt_<48-digit-hash>'" + }, + "variables": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Variables", + "description": "List of variable names that can be used in the prompt template" + }, + "is_default": { + "type": "boolean", + "title": "Is Default", + "description": "Boolean indicating whether this version is the default version", + "default": false + } + }, + "type": "object", + "required": [ + "version", + "prompt_id" + ], + "title": "Prompt", + "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack." + }, + "ProviderInfo": { + "properties": { + "api": { + "type": "string", + "title": "Api" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "provider_type": { + "type": "string", + "title": "Provider Type" + }, + "config": { + "additionalProperties": true, + "type": "object", + "title": "Config" + }, + "health": { + "additionalProperties": true, + "type": "object", + "title": "Health" + } + }, + "type": "object", + "required": [ + "api", + "provider_id", + "provider_type", + "config", + "health" + ], + "title": "ProviderInfo", + "description": "Information about a registered provider including its configuration and health status." + }, + "QueryChunksResponse": { + "properties": { + "chunks": { + "items": { + "$ref": "#/components/schemas/Chunk-Output" + }, + "type": "array", + "title": "Chunks" + }, + "scores": { + "items": { + "type": "number" + }, + "type": "array", + "title": "Scores" + } + }, + "type": "object", + "required": [ + "chunks", + "scores" + ], + "title": "QueryChunksResponse", + "description": "Response from querying chunks in a vector database." + }, + "RAGQueryConfig": { + "properties": { + "query_generator_config": { + "oneOf": [ + { + "$ref": "#/components/schemas/DefaultRAGQueryGeneratorConfig" + }, + { + "$ref": "#/components/schemas/LLMRAGQueryGeneratorConfig" + } + ], + "title": "Query Generator Config", + "default": { + "type": "default", + "separator": " " + }, + "discriminator": { + "propertyName": "type", + "mapping": { + "default": "#/components/schemas/DefaultRAGQueryGeneratorConfig", + "llm": "#/components/schemas/LLMRAGQueryGeneratorConfig" + } + } + }, + "max_tokens_in_context": { + "type": "integer", + "title": "Max Tokens In Context", + "default": 4096 + }, + "max_chunks": { + "type": "integer", + "title": "Max Chunks", + "default": 5 + }, + "chunk_template": { + "type": "string", + "title": "Chunk Template", + "default": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n" + }, + "mode": { + "default": "vector", + "$ref": "#/components/schemas/RAGSearchMode" + }, + "ranker": { + "title": "Ranker", + "oneOf": [ + { + "$ref": "#/components/schemas/RRFRanker" + }, + { + "$ref": "#/components/schemas/WeightedRanker" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "rrf": "#/components/schemas/RRFRanker", + "weighted": "#/components/schemas/WeightedRanker" + } + } + } + }, + "type": "object", + "title": "RAGQueryConfig", + "description": "Configuration for the RAG query generation." + }, + "RAGQueryResult": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "title": "RAGQueryResult", + "description": "Result of a RAG query containing retrieved content and metadata." + }, + "RAGSearchMode": { + "type": "string", + "enum": [ + "vector", + "keyword", + "hybrid" + ], + "title": "RAGSearchMode", + "description": "Search modes for RAG query retrieval:\n- VECTOR: Uses vector similarity search for semantic matching\n- KEYWORD: Uses keyword-based search for exact matching\n- HYBRID: Combines both vector and keyword search for better results" + }, + "RRFRanker": { + "properties": { + "type": { + "type": "string", + "const": "rrf", + "title": "Type", + "default": "rrf" + }, + "impact_factor": { + "type": "number", + "title": "Impact Factor", + "default": 60.0, + "minimum": 0.0 + } + }, + "type": "object", + "title": "RRFRanker", + "description": "Reciprocal Rank Fusion (RRF) ranker configuration." + }, + "RegexParserScoringFnParams": { + "properties": { + "type": { + "type": "string", + "const": "regex_parser", + "title": "Type", + "default": "regex_parser" + }, + "parsing_regexes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Parsing Regexes", + "description": "Regex to extract the answer from generated response" + }, + "aggregation_functions": { + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "type": "array", + "title": "Aggregation Functions", + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "type": "object", + "title": "RegexParserScoringFnParams", + "description": "Parameters for regex parser scoring function configuration." + }, + "RerankData": { + "properties": { + "index": { + "type": "integer", + "title": "Index" + }, + "relevance_score": { + "type": "number", + "title": "Relevance Score" + } + }, + "type": "object", + "required": [ + "index", + "relevance_score" + ], + "title": "RerankData", + "description": "A single rerank result from a reranking response." + }, + "RerankResponse": { + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/RerankData" + }, + "type": "array", + "title": "Data" + } + }, + "type": "object", + "required": [ + "data" + ], + "title": "RerankResponse", + "description": "Response from a reranking request." + }, + "RouteInfo": { + "properties": { + "route": { + "type": "string", + "title": "Route" + }, + "method": { + "type": "string", + "title": "Method" + }, + "provider_types": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Provider Types" + } + }, + "type": "object", + "required": [ + "route", + "method", + "provider_types" + ], + "title": "RouteInfo", + "description": "Information about an API route including its path, method, and implementing providers." + }, + "RowsDataSource": { + "properties": { + "type": { + "type": "string", + "const": "rows", + "title": "Type", + "default": "rows" + }, + "rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Rows" + } + }, + "type": "object", + "required": [ + "rows" + ], + "title": "RowsDataSource", + "description": "A dataset stored in rows." + }, + "RunShieldResponse": { + "properties": { + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "title": "RunShieldResponse", + "description": "Response from running a safety shield." + }, + "SafetyViolation": { + "properties": { + "violation_level": { + "$ref": "#/components/schemas/ViolationLevel" + }, + "user_message": { + "title": "User Message", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "violation_level" + ], + "title": "SafetyViolation", + "description": "Details of a safety violation detected by content moderation." + }, + "SamplingParams": { + "properties": { + "strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/GreedySamplingStrategy" + }, + { + "$ref": "#/components/schemas/TopPSamplingStrategy" + }, + { + "$ref": "#/components/schemas/TopKSamplingStrategy" + } + ], + "title": "Strategy", + "discriminator": { + "propertyName": "type", + "mapping": { + "greedy": "#/components/schemas/GreedySamplingStrategy", + "top_k": "#/components/schemas/TopKSamplingStrategy", + "top_p": "#/components/schemas/TopPSamplingStrategy" + } + } + }, + "max_tokens": { + "title": "Max Tokens", + "type": "integer" + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "default": 1.0, + "type": "number" + }, + "stop": { + "title": "Stop", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object", + "title": "SamplingParams", + "description": "Sampling parameters." + }, + "ScoreBatchResponse": { + "properties": { + "dataset_id": { + "title": "Dataset Id", + "type": "string" + }, + "results": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "results" + ], + "title": "ScoreBatchResponse", + "description": "Response from batch scoring operations on datasets." + }, + "ScoreResponse": { + "properties": { + "results": { + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "type": "object", + "title": "Results" + } + }, + "type": "object", + "required": [ + "results" + ], + "title": "ScoreResponse", + "description": "The response from scoring." + }, + "ScoringFn-Output": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "scoring_function", + "title": "Type", + "default": "scoring_function" + }, + "description": { + "title": "Description", + "type": "string" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata", + "description": "Any additional metadata for this definition" + }, + "return_type": { + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type", + "description": "The return type of the deterministic function", + "discriminator": { + "propertyName": "type", + "mapping": { + "agent_turn_input": "#/components/schemas/AgentTurnInputType", + "array": "#/components/schemas/ArrayType", + "boolean": "#/components/schemas/BooleanType", + "chat_completion_input": "#/components/schemas/ChatCompletionInputType", + "completion_input": "#/components/schemas/CompletionInputType", + "json": "#/components/schemas/JsonType", + "number": "#/components/schemas/NumberType", + "object": "#/components/schemas/ObjectType", + "string": "#/components/schemas/StringType", + "union": "#/components/schemas/UnionType" + } + } + }, + "params": { + "title": "Params", + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "basic": "#/components/schemas/BasicScoringFnParams", + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams" + } + } + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "description": "A scoring function resource for evaluating model outputs." + }, + "ScoringResult": { + "properties": { + "score_rows": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "type": "array", + "title": "Score Rows" + }, + "aggregated_results": { + "additionalProperties": true, + "type": "object", + "title": "Aggregated Results" + } + }, + "type": "object", + "required": [ + "score_rows", + "aggregated_results" + ], + "title": "ScoringResult", + "description": "A scoring result for a single row." + }, + "SearchRankingOptions": { + "properties": { + "ranker": { + "title": "Ranker", + "type": "string" + }, + "score_threshold": { + "title": "Score Threshold", + "default": 0.0, + "type": "number" + } + }, + "type": "object", + "title": "SearchRankingOptions", + "description": "Options for ranking and filtering search results." + }, + "Shield": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "shield", + "title": "Type", + "default": "shield" + }, + "params": { + "title": "Params", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "Shield", + "description": "A safety shield resource that can be used to check content." + }, + "ShieldCallStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "shield_call", + "title": "Step Type", + "default": "shield_call" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "description": "A shield call step in an agent turn." + }, + "StopReason": { + "type": "string", + "enum": [ + "end_of_turn", + "end_of_message", + "out_of_tokens" + ], + "title": "StopReason" + }, + "StringType": { + "properties": { + "type": { + "type": "string", + "const": "string", + "title": "Type", + "default": "string" + } + }, + "type": "object", + "title": "StringType", + "description": "Parameter type for string values." + }, + "SystemMessage": { + "properties": { + "role": { + "type": "string", + "const": "system", + "title": "Role", + "default": "system" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "SystemMessage", + "description": "A system message providing instructions or context to the model." + }, + "SystemMessageBehavior": { + "type": "string", + "enum": [ + "append", + "replace" + ], + "title": "SystemMessageBehavior", + "description": "Config for how to override the default system prompt." + }, + "TextContentItem": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type", + "default": "text" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "text" + ], + "title": "TextContentItem", + "description": "A text content item" + }, + "ToolCall": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "arguments": { + "type": "string", + "title": "Arguments" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "arguments" + ], + "title": "ToolCall" + }, + "ToolChoice": { + "type": "string", + "enum": [ + "auto", + "required", + "none" + ], + "title": "ToolChoice", + "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model." + }, + "ToolConfig": { + "properties": { + "tool_choice": { + "anyOf": [ + { + "$ref": "#/components/schemas/ToolChoice" + }, + { + "type": "string" + } + ], + "title": "Tool Choice", + "default": "auto" + }, + "tool_prompt_format": { + "$ref": "#/components/schemas/ToolPromptFormat" + }, + "system_message_behavior": { + "default": "append", + "$ref": "#/components/schemas/SystemMessageBehavior" + } + }, + "type": "object", + "title": "ToolConfig", + "description": "Configuration for tool use." + }, + "ToolDef": { + "properties": { + "toolgroup_id": { + "title": "Toolgroup Id", + "type": "string" + }, + "name": { + "type": "string", + "title": "Name" + }, + "description": { + "title": "Description", + "type": "string" + }, + "input_schema": { + "title": "Input Schema", + "additionalProperties": true, + "type": "object" + }, + "output_schema": { + "title": "Output Schema", + "additionalProperties": true, + "type": "object" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ToolDef", + "description": "Tool definition used in runtime contexts." + }, + "ToolExecutionStep-Output": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "step_id": { + "type": "string", + "title": "Step Id" + }, + "started_at": { + "title": "Started At", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + }, + "step_type": { + "type": "string", + "const": "tool_execution", + "title": "Step Type", + "default": "tool_execution" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array", + "title": "Tool Calls" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse-Output" + }, + "type": "array", + "title": "Tool Responses" + } + }, + "type": "object", + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "description": "A tool execution step in an agent turn." + }, + "ToolGroup": { + "properties": { + "identifier": { + "type": "string", + "title": "Identifier", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "title": "Provider Resource Id", + "description": "Unique identifier for this resource in the provider", + "type": "string" + }, + "provider_id": { + "type": "string", + "title": "Provider Id", + "description": "ID of the provider that owns this resource" + }, + "type": { + "type": "string", + "const": "tool_group", + "title": "Type", + "default": "tool_group" + }, + "mcp_endpoint": { + "$ref": "#/components/schemas/URL" + }, + "args": { + "title": "Args", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "identifier", + "provider_id" + ], + "title": "ToolGroup", + "description": "A group of related tools managed together." + }, + "ToolInvocationResult": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "error_message": { + "title": "Error Message", + "type": "string" + }, + "error_code": { + "title": "Error Code", + "type": "integer" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "title": "ToolInvocationResult", + "description": "Result of a tool invocation." + }, + "ToolPromptFormat": { + "type": "string", + "enum": [ + "json", + "function_tag", + "python_list" + ], + "title": "ToolPromptFormat", + "description": "Prompt format for calling custom / zero shot tools." + }, + "ToolResponse-Input": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "description": "Response from a tool invocation." + }, + "ToolResponse-Output": { + "properties": { + "call_id": { + "type": "string", + "title": "Call Id" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object" + } + }, + "type": "object", + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "description": "Response from a tool invocation." + }, + "ToolResponseMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "tool", + "title": "Role", + "default": "tool" + }, + "call_id": { + "type": "string", + "title": "Call Id" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + } + }, + "type": "object", + "required": [ + "call_id", + "content" + ], + "title": "ToolResponseMessage", + "description": "A message representing the result of a tool invocation." + }, + "TopKSamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "top_k", + "title": "Type", + "default": "top_k" + }, + "top_k": { + "type": "integer", + "minimum": 1.0, + "title": "Top K" + } + }, + "type": "object", + "required": [ + "top_k" + ], + "title": "TopKSamplingStrategy", + "description": "Top-k sampling strategy that restricts sampling to the k most likely tokens." + }, + "TopPSamplingStrategy": { + "properties": { + "type": { + "type": "string", + "const": "top_p", + "title": "Type", + "default": "top_p" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "minimum": 0.0 + }, + "top_p": { + "title": "Top P", + "default": 0.95, + "type": "number" + } + }, + "type": "object", + "required": [ + "temperature" + ], + "title": "TopPSamplingStrategy", + "description": "Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p." + }, + "TrainingConfig": { + "properties": { + "n_epochs": { + "type": "integer", + "title": "N Epochs" + }, + "max_steps_per_epoch": { + "type": "integer", + "title": "Max Steps Per Epoch", + "default": 1 + }, + "gradient_accumulation_steps": { + "type": "integer", + "title": "Gradient Accumulation Steps", + "default": 1 + }, + "max_validation_steps": { + "title": "Max Validation Steps", + "default": 1, + "type": "integer" + }, + "data_config": { + "$ref": "#/components/schemas/DataConfig" + }, + "optimizer_config": { + "$ref": "#/components/schemas/OptimizerConfig" + }, + "efficiency_config": { + "$ref": "#/components/schemas/EfficiencyConfig" + }, + "dtype": { + "title": "Dtype", + "default": "bf16", + "type": "string" + } + }, + "type": "object", + "required": [ + "n_epochs" + ], + "title": "TrainingConfig", + "description": "Comprehensive configuration for the training process." + }, + "Turn": { + "properties": { + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "input_messages": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/UserMessage-Output" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage-Output" + } + ] + }, + "type": "array", + "title": "Input Messages" + }, + "steps": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep-Output" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep-Output" + }, + { + "$ref": "#/components/schemas/ShieldCallStep-Output" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep-Output" + } + ], + "discriminator": { + "propertyName": "step_type", + "mapping": { + "inference": "#/components/schemas/InferenceStep-Output", + "memory_retrieval": "#/components/schemas/MemoryRetrievalStep-Output", + "shield_call": "#/components/schemas/ShieldCallStep-Output", + "tool_execution": "#/components/schemas/ToolExecutionStep-Output" + } + } + }, + "type": "array", + "title": "Steps" + }, + "output_message": { + "$ref": "#/components/schemas/CompletionMessage-Output" + }, + "output_attachments": { + "title": "Output Attachments", + "items": { + "$ref": "#/components/schemas/Attachment-Output" + }, + "type": "array" + }, + "started_at": { + "type": "string", + "format": "date-time", + "title": "Started At" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time" + } + }, + "type": "object", + "required": [ + "turn_id", + "session_id", + "input_messages", + "steps", + "output_message", + "started_at" + ], + "title": "Turn", + "description": "A single turn in an interaction with an Agentic System." + }, + "URIDataSource": { + "properties": { + "type": { + "type": "string", + "const": "uri", + "title": "Type", + "default": "uri" + }, + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URIDataSource", + "description": "A dataset that can be obtained from a URI." + }, + "URL": { + "properties": { + "uri": { + "type": "string", + "title": "Uri" + } + }, + "type": "object", + "required": [ + "uri" + ], + "title": "URL", + "description": "A URL reference to external content." + }, + "UnionType": { + "properties": { + "type": { + "type": "string", + "const": "union", + "title": "Type", + "default": "union" + } + }, + "type": "object", + "title": "UnionType", + "description": "Parameter type for union values." + }, + "UserMessage-Input": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Input" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Input", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Context" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "UserMessage-Output": { + "properties": { + "role": { + "type": "string", + "const": "user", + "title": "Role", + "default": "user" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Content" + }, + "context": { + "anyOf": [ + { + "type": "string" + }, + { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem-Output" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem-Output", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "type": "array" + } + ], + "title": "Context" + } + }, + "type": "object", + "required": [ + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "VectorStoreChunkingStrategyAuto": { + "properties": { + "type": { + "type": "string", + "const": "auto", + "title": "Type", + "default": "auto" + } + }, + "type": "object", + "title": "VectorStoreChunkingStrategyAuto", + "description": "Automatic chunking strategy for vector store files." + }, + "VectorStoreChunkingStrategyStatic": { + "properties": { + "type": { + "type": "string", + "const": "static", + "title": "Type", + "default": "static" + }, + "static": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStaticConfig" + } + }, + "type": "object", + "required": [ + "static" + ], + "title": "VectorStoreChunkingStrategyStatic", + "description": "Static chunking strategy with configurable parameters." + }, + "VectorStoreChunkingStrategyStaticConfig": { + "properties": { + "chunk_overlap_tokens": { + "type": "integer", + "title": "Chunk Overlap Tokens", + "default": 400 + }, + "max_chunk_size_tokens": { + "type": "integer", + "maximum": 4096.0, + "minimum": 100.0, + "title": "Max Chunk Size Tokens", + "default": 800 + } + }, + "type": "object", + "title": "VectorStoreChunkingStrategyStaticConfig", + "description": "Configuration for static chunking strategy." + }, + "VectorStoreContent": { + "properties": { + "type": { + "type": "string", + "const": "text", + "title": "Type" + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "type", + "text" + ], + "title": "VectorStoreContent", + "description": "Content item from a vector store file or search result." + }, + "VectorStoreFileBatchObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.file_batch" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "status": { + "anyOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ], + "title": "Status" + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts" + } + }, + "type": "object", + "required": [ + "id", + "created_at", + "vector_store_id", + "status", + "file_counts" + ], + "title": "VectorStoreFileBatchObject", + "description": "OpenAI Vector Store File Batch object." + }, + "VectorStoreFileCounts": { + "properties": { + "completed": { + "type": "integer", + "title": "Completed" + }, + "cancelled": { + "type": "integer", + "title": "Cancelled" + }, + "failed": { + "type": "integer", + "title": "Failed" + }, + "in_progress": { + "type": "integer", + "title": "In Progress" + }, + "total": { + "type": "integer", + "title": "Total" + } + }, + "type": "object", + "required": [ + "completed", + "cancelled", + "failed", + "in_progress", + "total" + ], + "title": "VectorStoreFileCounts", + "description": "File processing status counts for a vector store." + }, + "VectorStoreFileLastError": { + "properties": { + "code": { + "anyOf": [ + { + "type": "string", + "const": "server_error" + }, + { + "type": "string", + "const": "rate_limit_exceeded" + } + ], + "title": "Code" + }, + "message": { + "type": "string", + "title": "Message" + } + }, + "type": "object", + "required": [ + "code", + "message" + ], + "title": "VectorStoreFileLastError", + "description": "Error information for failed vector store file processing." + }, + "VectorStoreFileObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.file" + }, + "attributes": { + "additionalProperties": true, + "type": "object", + "title": "Attributes" + }, + "chunking_strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy", + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "last_error": { + "$ref": "#/components/schemas/VectorStoreFileLastError" + }, + "status": { + "anyOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ], + "title": "Status" + }, + "usage_bytes": { + "type": "integer", + "title": "Usage Bytes", + "default": 0 + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "id", + "chunking_strategy", + "created_at", + "status", + "vector_store_id" + ], + "title": "VectorStoreFileObject", + "description": "OpenAI Vector Store File object." + }, + "VectorStoreObject": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "object": { + "type": "string", + "title": "Object", + "default": "vector_store" + }, + "created_at": { + "type": "integer", + "title": "Created At" + }, + "name": { + "title": "Name", + "type": "string" + }, + "usage_bytes": { + "type": "integer", + "title": "Usage Bytes", + "default": 0 + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts" + }, + "status": { + "type": "string", + "title": "Status", + "default": "completed" + }, + "expires_after": { + "title": "Expires After", + "additionalProperties": true, + "type": "object" + }, + "expires_at": { + "title": "Expires At", + "type": "integer" + }, + "last_active_at": { + "title": "Last Active At", + "type": "integer" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "id", + "created_at", + "file_counts" + ], + "title": "VectorStoreObject", + "description": "OpenAI Vector Store object." + }, + "VectorStoreSearchResponse": { + "properties": { + "file_id": { + "type": "string", + "title": "File Id" + }, + "filename": { + "type": "string", + "title": "Filename" + }, + "score": { + "type": "number", + "title": "Score" + }, + "attributes": { + "title": "Attributes", + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "boolean" + } + ] + }, + "type": "object" + }, + "content": { + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "type": "array", + "title": "Content" + } + }, + "type": "object", + "required": [ + "file_id", + "filename", + "score", + "content" + ], + "title": "VectorStoreSearchResponse", + "description": "Response from searching a vector store." + }, + "VectorStoreSearchResponsePage": { + "properties": { + "object": { + "type": "string", + "title": "Object", + "default": "vector_store.search_results.page" + }, + "search_query": { + "type": "string", + "title": "Search Query" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreSearchResponse" + }, + "type": "array", + "title": "Data" + }, + "has_more": { + "type": "boolean", + "title": "Has More", + "default": false + }, + "next_page": { + "title": "Next Page", + "type": "string" + } + }, + "type": "object", + "required": [ + "search_query", + "data" + ], + "title": "VectorStoreSearchResponsePage", + "description": "Paginated response from searching a vector store." + }, + "VersionInfo": { + "properties": { + "version": { + "type": "string", + "title": "Version" + } + }, + "type": "object", + "required": [ + "version" + ], + "title": "VersionInfo", + "description": "Version information for the service." + }, + "ViolationLevel": { + "type": "string", + "enum": [ + "info", + "warn", + "error" + ], + "title": "ViolationLevel", + "description": "Severity level of a safety violation." + }, + "WeightedRanker": { + "properties": { + "type": { + "type": "string", + "const": "weighted", + "title": "Type", + "default": "weighted" + }, + "alpha": { + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "title": "Alpha", + "description": "Weight factor between 0 and 1. 0 means only keyword scores, 1 means only vector scores.", + "default": 0.5 + } + }, + "type": "object", + "title": "WeightedRanker", + "description": "Weighted ranker configuration that combines vector and keyword scores." + }, + "_URLOrData": { + "properties": { + "url": { + "$ref": "#/components/schemas/URL" + }, + "data": { + "contentEncoding": "base64", + "title": "Data", + "type": "string" + } + }, + "type": "object", + "title": "_URLOrData", + "description": "A URL or a base64 encoded string" + }, + "__main_____agents_agent_id_session_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_name": { + "type": "string", + "title": "Session Name" + } + }, + "type": "object", + "required": [ + "agent_id", + "session_name" + ], + "title": "_agents_agent_id_session_Request" + }, + "__main_____agents_agent_id_session_session_id_turn_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "messages": { + "$ref": "#/components/schemas/UserMessage-Input" + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "documents": { + "$ref": "#/components/schemas/Document" + }, + "toolgroups": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/AgentToolGroupWithArgs" + } + ], + "title": "Toolgroups" + }, + "tool_config": { + "$ref": "#/components/schemas/ToolConfig" + } + }, + "type": "object", + "required": [ + "agent_id", + "session_id", + "messages", + "documents", + "toolgroups", + "tool_config" + ], + "title": "_agents_agent_id_session_session_id_turn_Request" + }, + "__main_____agents_agent_id_session_session_id_turn_turn_id_resume_Request": { + "properties": { + "agent_id": { + "type": "string", + "title": "Agent Id" + }, + "session_id": { + "type": "string", + "title": "Session Id" + }, + "turn_id": { + "type": "string", + "title": "Turn Id" + }, + "tool_responses": { + "$ref": "#/components/schemas/ToolResponse-Input" + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + } + }, + "type": "object", + "required": [ + "agent_id", + "session_id", + "turn_id", + "tool_responses" + ], + "title": "_agents_agent_id_session_session_id_turn_turn_id_resume_Request" + }, + "__main_____datasets_Request": { + "properties": { + "purpose": { + "$ref": "#/components/schemas/DatasetPurpose" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "dataset_id": { + "type": "string", + "title": "Dataset Id" + } + }, + "type": "object", + "required": [ + "purpose", + "metadata", + "dataset_id" + ], + "title": "_datasets_Request" + }, + "_batches_Request": { + "properties": { + "input_file_id": { + "type": "string", + "title": "Input File Id" + }, + "endpoint": { + "type": "string", + "title": "Endpoint" + }, + "completion_window": { + "type": "string", + "title": "Completion Window" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "idempotency_key": { + "type": "string", + "title": "Idempotency Key" + } + }, + "type": "object", + "required": [ + "input_file_id", + "endpoint", + "completion_window", + "metadata", + "idempotency_key" + ], + "title": "_batches_Request" + }, + "_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + } + }, + "type": "object", + "required": [ + "batch_id" + ], + "title": "_batches_batch_id_cancel_Request" + }, + "_conversations_Request": { + "properties": { + "items": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Input" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "title": "Items", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "function_call_output": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest", + "mcp_approval_response": "#/components/schemas/OpenAIResponseMCPApprovalResponse", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "message": "#/components/schemas/OpenAIResponseMessage-Input", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + } + } + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "items", + "metadata" + ], + "title": "_conversations_Request" + }, + "_conversations_conversation_id_Request": { + "properties": { + "conversation_id": { + "type": "string", + "title": "Conversation Id" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "conversation_id", + "metadata" + ], + "title": "_conversations_conversation_id_Request" + }, + "_conversations_conversation_id_items_Request": { + "properties": { + "conversation_id": { + "type": "string", + "title": "Conversation Id" + }, + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage-Input" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "title": "Items" + } + }, + "type": "object", + "required": [ + "conversation_id", + "items" + ], + "title": "_conversations_conversation_id_items_Request" + }, + "_inference_rerank_Request": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "query": { + "type": "string", + "title": "Query" + }, + "items": { + "type": "string", + "title": "Items" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results" + } + }, + "type": "object", + "required": [ + "model", + "query", + "items", + "max_num_results" + ], + "title": "_inference_rerank_Request" + }, + "_models_Request": { + "properties": { + "model_id": { + "type": "string", + "title": "Model Id" + }, + "provider_model_id": { + "type": "string", + "title": "Provider Model Id" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "metadata": { + "type": "string", + "title": "Metadata" + }, + "model_type": { + "$ref": "#/components/schemas/ModelType" + } + }, + "type": "object", + "required": [ + "model_id", + "provider_model_id", + "provider_id", + "metadata", + "model_type" + ], + "title": "_models_Request" + }, + "_moderations_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + } + }, + "type": "object", + "required": [ + "input", + "model" + ], + "title": "_moderations_Request" + }, + "_prompts_Request": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt" + }, + "variables": { + "type": "string", + "title": "Variables" + } + }, + "type": "object", + "required": [ + "prompt", + "variables" + ], + "title": "_prompts_Request" + }, + "_prompts_prompt_id_Request": { + "properties": { + "prompt_id": { + "type": "string", + "title": "Prompt Id" + }, + "prompt": { + "type": "string", + "title": "Prompt" + }, + "version": { + "type": "integer", + "title": "Version" + }, + "variables": { + "type": "string", + "title": "Variables" + }, + "set_as_default": { + "type": "boolean", + "title": "Set As Default", + "default": true + } + }, + "type": "object", + "required": [ + "prompt_id", + "prompt", + "version", + "variables" + ], + "title": "_prompts_prompt_id_Request" + }, + "_prompts_prompt_id_set_default_version_Request": { + "properties": { + "prompt_id": { + "type": "string", + "title": "Prompt Id" + }, + "version": { + "type": "integer", + "title": "Version" + } + }, + "type": "object", + "required": [ + "prompt_id", + "version" + ], + "title": "_prompts_prompt_id_set_default_version_Request" + }, + "_responses_Request": { + "properties": { + "input": { + "type": "string", + "title": "Input" + }, + "model": { + "type": "string", + "title": "Model" + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt" + }, + "instructions": { + "type": "string", + "title": "Instructions" + }, + "previous_response_id": { + "type": "string", + "title": "Previous Response Id" + }, + "conversation": { + "type": "string", + "title": "Conversation" + }, + "store": { + "type": "boolean", + "title": "Store", + "default": true + }, + "stream": { + "type": "boolean", + "title": "Stream", + "default": false + }, + "temperature": { + "type": "number", + "title": "Temperature" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText" + }, + "tools": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolMCP" + } + ], + "title": "Tools", + "discriminator": { + "propertyName": "type", + "mapping": { + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseInputToolMCP", + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/components/schemas/OpenAIResponseInputToolWebSearch" + } + } + }, + "include": { + "type": "string", + "title": "Include" + }, + "max_infer_iters": { + "type": "integer", + "title": "Max Infer Iters", + "default": 10 + } + }, + "type": "object", + "required": [ + "input", + "model", + "prompt", + "instructions", + "previous_response_id", + "conversation", + "temperature", + "text", + "tools", + "include" + ], + "title": "_responses_Request" + }, + "_scoring_score_Request": { + "properties": { + "input_rows": { + "type": "string", + "title": "Input Rows" + }, + "scoring_functions": { + "type": "string", + "title": "Scoring Functions" + } + }, + "type": "object", + "required": [ + "input_rows", + "scoring_functions" + ], + "title": "_scoring_score_Request" + }, + "_scoring_score_batch_Request": { + "properties": { + "dataset_id": { + "type": "string", + "title": "Dataset Id" + }, + "scoring_functions": { + "type": "string", + "title": "Scoring Functions" + }, + "save_results_dataset": { + "type": "boolean", + "title": "Save Results Dataset", + "default": false + } + }, + "type": "object", + "required": [ + "dataset_id", + "scoring_functions" + ], + "title": "_scoring_score_batch_Request" + }, + "_shields_Request": { + "properties": { + "shield_id": { + "type": "string", + "title": "Shield Id" + }, + "provider_shield_id": { + "type": "string", + "title": "Provider Shield Id" + }, + "provider_id": { + "type": "string", + "title": "Provider Id" + }, + "params": { + "type": "string", + "title": "Params" + } + }, + "type": "object", + "required": [ + "shield_id", + "provider_shield_id", + "provider_id", + "params" + ], + "title": "_shields_Request" + }, + "_tool_runtime_invoke_Request": { + "properties": { + "tool_name": { + "type": "string", + "title": "Tool Name" + }, + "kwargs": { + "type": "string", + "title": "Kwargs" + } + }, + "type": "object", + "required": [ + "tool_name", + "kwargs" + ], + "title": "_tool_runtime_invoke_Request" + }, + "_tool_runtime_rag_tool_query_Request": { + "properties": { + "content": { + "type": "string", + "title": "Content" + }, + "vector_store_ids": { + "type": "string", + "title": "Vector Store Ids" + }, + "query_config": { + "$ref": "#/components/schemas/RAGQueryConfig" + } + }, + "type": "object", + "required": [ + "content", + "vector_store_ids", + "query_config" + ], + "title": "_tool_runtime_rag_tool_query_Request" + }, + "_vector_io_query_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "params": { + "type": "string", + "title": "Params" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "params" + ], + "title": "_vector_io_query_Request" + }, + "_vector_stores_vector_store_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "expires_after": { + "type": "string", + "title": "Expires After" + }, + "metadata": { + "type": "string", + "title": "Metadata" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "name", + "expires_after", + "metadata" + ], + "title": "_vector_stores_vector_store_id_Request" + }, + "_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request": { + "properties": { + "batch_id": { + "type": "string", + "title": "Batch Id" + }, + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + } + }, + "type": "object", + "required": [ + "batch_id", + "vector_store_id" + ], + "title": "_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request" + }, + "_vector_stores_vector_store_id_files_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + }, + "chunking_strategy": { + "anyOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "title": "Chunking Strategy" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes", + "chunking_strategy" + ], + "title": "_vector_stores_vector_store_id_files_Request" + }, + "_vector_stores_vector_store_id_files_file_id_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "file_id": { + "type": "string", + "title": "File Id" + }, + "attributes": { + "type": "string", + "title": "Attributes" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "file_id", + "attributes" + ], + "title": "_vector_stores_vector_store_id_files_file_id_Request" + }, + "_vector_stores_vector_store_id_search_Request": { + "properties": { + "vector_store_id": { + "type": "string", + "title": "Vector Store Id" + }, + "query": { + "type": "string", + "title": "Query" + }, + "filters": { + "type": "string", + "title": "Filters" + }, + "max_num_results": { + "type": "integer", + "title": "Max Num Results", + "default": 10 + }, + "ranking_options": { + "$ref": "#/components/schemas/SearchRankingOptions" + }, + "rewrite_query": { + "type": "boolean", + "title": "Rewrite Query", + "default": false + }, + "search_mode": { + "type": "string", + "title": "Search Mode", + "default": "vector" + } + }, + "type": "object", + "required": [ + "vector_store_id", + "query", + "filters", + "ranking_options" + ], + "title": "_vector_stores_vector_store_id_search_Request" + }, + "Error": { + "description": "Error response from the API. Roughly follows RFC 7807.", + "properties": { + "status": { + "title": "Status", + "type": "integer" + }, + "title": { + "title": "Title", + "type": "string" + }, + "detail": { + "title": "Detail", + "type": "string" + }, + "instance": { + "title": "Instance", + "type": "string", + "nullable": true + } + }, + "required": [ + "status", + "title", + "detail" + ], + "title": "Error", + "type": "object" + }, + "Agent": { + "description": "An agent instance with configuration and metadata.", + "properties": { + "agent_id": { + "title": "Agent Id", + "type": "string" + }, + "agent_config": { + "$ref": "#/components/schemas/AgentConfig" + }, + "created_at": { + "format": "date-time", + "title": "Created At", + "type": "string" + } + }, + "required": [ + "agent_id", + "agent_config", + "created_at" + ], + "title": "Agent", + "type": "object" + }, + "AgentStepResponse": { + "description": "Response containing details of a specific agent step.", + "properties": { + "step": { + "discriminator": { + "mapping": { + "inference": "#/$defs/InferenceStep", + "memory_retrieval": "#/$defs/MemoryRetrievalStep", + "shield_call": "#/$defs/ShieldCallStep", + "tool_execution": "#/$defs/ToolExecutionStep" + }, + "propertyName": "step_type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/InferenceStep" + }, + { + "$ref": "#/components/schemas/ToolExecutionStep" + }, + { + "$ref": "#/components/schemas/ShieldCallStep" + }, + { + "$ref": "#/components/schemas/MemoryRetrievalStep" + } + ], + "title": "Step" + } + }, + "required": [ + "step" + ], + "title": "AgentStepResponse", + "type": "object" + }, + "CompletionMessage": { + "description": "A message containing the model's (assistant) response in a chat conversation.", + "properties": { + "role": { + "const": "assistant", + "default": "assistant", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "stop_reason": { + "$ref": "#/components/schemas/StopReason" + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "type": "array" + } + }, + "required": [ + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "type": "object" + }, + "InferenceStep": { + "description": "An inference step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "inference", + "default": "inference", + "title": "Step Type", + "type": "string" + }, + "model_response": { + "$ref": "#/components/schemas/CompletionMessage" + } + }, + "required": [ + "turn_id", + "step_id", + "model_response" + ], + "title": "InferenceStep", + "type": "object" + }, + "ListOpenAIResponseInputItem": { + "description": "List container for OpenAI response input items.", + "properties": { + "data": { + "items": { + "anyOf": [ + { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "title": "Data", + "type": "array" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data" + ], + "title": "ListOpenAIResponseInputItem", + "type": "object" + }, + "ListOpenAIResponseObject": { + "description": "Paginated list of OpenAI response objects with navigation metadata.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIResponseObjectWithInput" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIResponseObject", + "type": "object" + }, + "MemoryRetrievalStep": { + "description": "A memory retrieval step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "memory_retrieval", + "default": "memory_retrieval", + "title": "Step Type", + "type": "string" + }, + "vector_store_ids": { + "title": "Vector Store Ids", + "type": "string" + }, + "inserted_context": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Inserted Context" + } + }, + "required": [ + "turn_id", + "step_id", + "vector_store_ids", + "inserted_context" + ], + "title": "MemoryRetrievalStep", + "type": "object" + }, + "OpenAIDeleteResponseObject": { + "description": "Response object confirming deletion of an OpenAI response.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "const": "response", + "default": "response", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "OpenAIDeleteResponseObject", + "type": "object" + }, + "PaginatedResponse": { + "description": "A generic paginated response that follows a simple format.", + "properties": { + "data": { + "items": { + "additionalProperties": true, + "type": "object" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "url": { + "title": "Url", + "type": "string", + "nullable": true + } + }, + "required": [ + "data", + "has_more" + ], + "title": "PaginatedResponse", + "type": "object" + }, + "Session": { + "description": "A single session of an interaction with an Agentic System.", + "properties": { + "session_id": { + "title": "Session Id", + "type": "string" + }, + "session_name": { + "title": "Session Name", + "type": "string" + }, + "turns": { + "items": { + "$ref": "#/components/schemas/Turn" + }, + "title": "Turns", + "type": "array" + }, + "started_at": { + "format": "date-time", + "title": "Started At", + "type": "string" + } + }, + "required": [ + "session_id", + "session_name", + "turns", + "started_at" + ], + "title": "Session", + "type": "object" + }, + "ShieldCallStep": { + "description": "A shield call step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "shield_call", + "default": "shield_call", + "title": "Step Type", + "type": "string" + }, + "violation": { + "$ref": "#/components/schemas/SafetyViolation" + } + }, + "required": [ + "turn_id", + "step_id", + "violation" + ], + "title": "ShieldCallStep", + "type": "object" + }, + "ToolExecutionStep": { + "description": "A tool execution step in an agent turn.", + "properties": { + "turn_id": { + "title": "Turn Id", + "type": "string" + }, + "step_id": { + "title": "Step Id", + "type": "string" + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "step_type": { + "const": "tool_execution", + "default": "tool_execution", + "title": "Step Type", + "type": "string" + }, + "tool_calls": { + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "title": "Tool Calls", + "type": "array" + }, + "tool_responses": { + "items": { + "$ref": "#/components/schemas/ToolResponse" + }, + "title": "Tool Responses", + "type": "array" + } + }, + "required": [ + "turn_id", + "step_id", + "tool_calls", + "tool_responses" + ], + "title": "ToolExecutionStep", + "type": "object" + }, + "ToolResponse": { + "description": "Response from a tool invocation.", + "properties": { + "call_id": { + "title": "Call Id", + "type": "string" + }, + "tool_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BuiltinTool" + }, + { + "type": "string" + } + ], + "title": "Tool Name" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + { + "items": { + "discriminator": { + "mapping": { + "image": "#/$defs/ImageContentItem", + "text": "#/$defs/TextContentItem" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "required": [ + "call_id", + "tool_name", + "content" + ], + "title": "ToolResponse", + "type": "object" + }, + "ListBatchesResponse": { + "description": "Response containing a list of batch objects.", + "properties": { + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "description": "List of batch objects", + "items": { + "$ref": "#/components/schemas/Batch" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "description": "ID of the first batch in the list", + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "description": "ID of the last batch in the list", + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "description": "Whether there are more batches available", + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "ListBatchesResponse", + "type": "object" + }, + "ConversationDeletedResource": { + "description": "Response for deleted conversation.", + "properties": { + "id": { + "description": "The deleted conversation identifier", + "title": "Id", + "type": "string" + }, + "object": { + "default": "conversation.deleted", + "description": "Object type", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "description": "Whether the object was deleted", + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "ConversationDeletedResource", + "type": "object" + }, + "ConversationItemDeletedResource": { + "description": "Response for deleted conversation item.", + "properties": { + "id": { + "description": "The deleted item identifier", + "title": "Id", + "type": "string" + }, + "object": { + "default": "conversation.item.deleted", + "description": "Object type", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "description": "Whether the object was deleted", + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "ConversationItemDeletedResource", + "type": "object" + }, + "ListOpenAIFileResponse": { + "description": "Response for listing files in OpenAI Files API.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAIFileObject" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIFileResponse", + "type": "object" + }, + "OpenAIFileDeleteResponse": { + "description": "Response for deleting a file in OpenAI Files API.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "const": "file", + "default": "file", + "title": "Object", + "type": "string" + }, + "deleted": { + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id", + "deleted" + ], + "title": "OpenAIFileDeleteResponse", + "type": "object" + }, + "ListOpenAIChatCompletionResponse": { + "description": "Response from listing OpenAI-compatible chat completions.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + }, + "title": "Data", + "type": "array" + }, + "has_more": { + "title": "Has More", + "type": "boolean" + }, + "first_id": { + "title": "First Id", + "type": "string" + }, + "last_id": { + "title": "Last Id", + "type": "string" + }, + "object": { + "const": "list", + "default": "list", + "title": "Object", + "type": "string" + } + }, + "required": [ + "data", + "has_more", + "first_id", + "last_id" + ], + "title": "ListOpenAIChatCompletionResponse", + "type": "object" + }, + "OpenAIAssistantMessageParam": { + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.", + "properties": { + "role": { + "const": "assistant", + "default": "assistant", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + "type": "array" + } + ], + "title": "Content", + "nullable": true + }, + "name": { + "title": "Name", + "type": "string", + "nullable": true + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAIAssistantMessageParam", + "type": "object" + }, + "OpenAIChoice": { + "description": "A choice from an OpenAI-compatible chat completion response.", + "properties": { + "message": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/OpenAIAssistantMessageParam", + "developer": "#/$defs/OpenAIDeveloperMessageParam", + "system": "#/$defs/OpenAISystemMessageParam", + "tool": "#/$defs/OpenAIToolMessageParam", + "user": "#/$defs/OpenAIUserMessageParam" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Message" + }, + "finish_reason": { + "title": "Finish Reason", + "type": "string" + }, + "index": { + "title": "Index", + "type": "integer" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "nullable": true + } + }, + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "type": "object" + }, + "OpenAIChoiceLogprobs": { + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.", + "properties": { + "content": { + "title": "Content", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "type": "array", + "nullable": true + } + }, + "title": "OpenAIChoiceLogprobs", + "type": "object" + }, + "OpenAICompletionWithInputMessages": { + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "choices": { + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "title": "Choices", + "type": "array" + }, + "object": { + "const": "chat.completion", + "default": "chat.completion", + "title": "Object", + "type": "string" + }, + "created": { + "title": "Created", + "type": "integer" + }, + "model": { + "title": "Model", + "type": "string" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIChatCompletionUsage", + "nullable": true + }, + "input_messages": { + "items": { + "discriminator": { + "mapping": { + "assistant": "#/$defs/OpenAIAssistantMessageParam", + "developer": "#/$defs/OpenAIDeveloperMessageParam", + "system": "#/$defs/OpenAISystemMessageParam", + "tool": "#/$defs/OpenAIToolMessageParam", + "user": "#/$defs/OpenAIUserMessageParam" + }, + "propertyName": "role" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ] + }, + "title": "Input Messages", + "type": "array" + } + }, + "required": [ + "id", + "choices", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages", + "type": "object" + }, + "OpenAIUserMessageParam": { + "description": "A message from the user in an OpenAI-compatible chat completion request.", + "properties": { + "role": { + "const": "user", + "default": "user", + "title": "Role", + "type": "string" + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "discriminator": { + "mapping": { + "file": "#/$defs/OpenAIFile", + "image_url": "#/$defs/OpenAIChatCompletionContentPartImageParam", + "text": "#/$defs/OpenAIChatCompletionContentPartTextParam" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "name": { + "title": "Name", + "type": "string", + "nullable": true + } + }, + "required": [ + "content" + ], + "title": "OpenAIUserMessageParam", + "type": "object" + }, + "Checkpoint": { + "description": "Checkpoint created during training runs.", + "properties": { + "identifier": { + "title": "Identifier", + "type": "string" + }, + "created_at": { + "format": "date-time", + "title": "Created At", + "type": "string" + }, + "epoch": { + "title": "Epoch", + "type": "integer" + }, + "post_training_job_id": { + "title": "Post Training Job Id", + "type": "string" + }, + "path": { + "title": "Path", + "type": "string" + }, + "training_metrics": { + "$ref": "#/components/schemas/PostTrainingMetric", + "nullable": true + } + }, + "required": [ + "identifier", + "created_at", + "epoch", + "post_training_job_id", + "path" + ], + "title": "Checkpoint", + "type": "object" + }, + "PostTrainingJobArtifactsResponse": { + "description": "Artifacts of a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "checkpoints": { + "items": { + "$ref": "#/components/schemas/Checkpoint" + }, + "title": "Checkpoints", + "type": "array" + } + }, + "required": [ + "job_uuid" + ], + "title": "PostTrainingJobArtifactsResponse", + "type": "object" + }, + "PostTrainingJobStatusResponse": { + "description": "Status of a finetuning job.", + "properties": { + "job_uuid": { + "title": "Job Uuid", + "type": "string" + }, + "status": { + "$ref": "#/components/schemas/JobStatus" + }, + "scheduled_at": { + "title": "Scheduled At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "started_at": { + "title": "Started At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "completed_at": { + "title": "Completed At", + "format": "date-time", + "type": "string", + "nullable": true + }, + "resources_allocated": { + "title": "Resources Allocated", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "checkpoints": { + "items": { + "$ref": "#/components/schemas/Checkpoint" + }, + "title": "Checkpoints", + "type": "array" + } + }, + "required": [ + "job_uuid", + "status" + ], + "title": "PostTrainingJobStatusResponse", + "type": "object" + }, + "ScoringFn": { + "description": "A scoring function resource for evaluating model outputs.", + "properties": { + "identifier": { + "description": "Unique identifier for this resource in llama stack", + "title": "Identifier", + "type": "string" + }, + "provider_resource_id": { + "description": "Unique identifier for this resource in the provider", + "title": "Provider Resource Id", + "type": "string", + "nullable": true + }, + "provider_id": { + "description": "ID of the provider that owns this resource", + "title": "Provider Id", + "type": "string" + }, + "type": { + "const": "scoring_function", + "default": "scoring_function", + "title": "Type", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string", + "nullable": true + }, + "metadata": { + "additionalProperties": true, + "description": "Any additional metadata for this definition", + "title": "Metadata", + "type": "object" + }, + "return_type": { + "description": "The return type of the deterministic function", + "discriminator": { + "mapping": { + "agent_turn_input": "#/$defs/AgentTurnInputType", + "array": "#/$defs/ArrayType", + "boolean": "#/$defs/BooleanType", + "chat_completion_input": "#/$defs/ChatCompletionInputType", + "completion_input": "#/$defs/CompletionInputType", + "json": "#/$defs/JsonType", + "number": "#/$defs/NumberType", + "object": "#/$defs/ObjectType", + "string": "#/$defs/StringType", + "union": "#/$defs/UnionType" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "title": "Return Type" + }, + "params": { + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval", + "title": "Params", + "discriminator": { + "mapping": { + "basic": "#/$defs/BasicScoringFnParams", + "llm_as_judge": "#/$defs/LLMAsJudgeScoringFnParams", + "regex_parser": "#/$defs/RegexParserScoringFnParams" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "nullable": true + } + }, + "required": [ + "identifier", + "provider_id", + "return_type" + ], + "title": "ScoringFn", + "type": "object" + }, + "ListToolDefsResponse": { + "description": "Response containing a list of tool definitions.", + "properties": { + "data": { + "items": { + "$ref": "#/components/schemas/ToolDef" + }, + "title": "Data", + "type": "array" + } + }, + "required": [ + "data" + ], + "title": "ListToolDefsResponse", + "type": "object" + }, + "VectorStoreDeleteResponse": { + "description": "Response from deleting a vector store.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "default": "vector_store.deleted", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "VectorStoreDeleteResponse", + "type": "object" + }, + "VectorStoreFileContentsResponse": { + "description": "Response from retrieving the contents of a vector store file.", + "properties": { + "file_id": { + "title": "File Id", + "type": "string" + }, + "filename": { + "title": "Filename", + "type": "string" + }, + "attributes": { + "additionalProperties": true, + "title": "Attributes", + "type": "object" + }, + "content": { + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "title": "Content", + "type": "array" + } + }, + "required": [ + "file_id", + "filename", + "attributes", + "content" + ], + "title": "VectorStoreFileContentsResponse", + "type": "object" + }, + "VectorStoreFileDeleteResponse": { + "description": "Response from deleting a vector store file.", + "properties": { + "id": { + "title": "Id", + "type": "string" + }, + "object": { + "default": "vector_store.file.deleted", + "title": "Object", + "type": "string" + }, + "deleted": { + "default": true, + "title": "Deleted", + "type": "boolean" + } + }, + "required": [ + "id" + ], + "title": "VectorStoreFileDeleteResponse", + "type": "object" + }, + "VectorStoreFilesListInBatchResponse": { + "description": "Response from listing files in a vector store file batch.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreFileObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreFilesListInBatchResponse", + "type": "object" + }, + "VectorStoreListFilesResponse": { + "description": "Response from listing files in a vector store.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreFileObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreListFilesResponse", + "type": "object" + }, + "VectorStoreListResponse": { + "description": "Response from listing vector stores.", + "properties": { + "object": { + "default": "list", + "title": "Object", + "type": "string" + }, + "data": { + "items": { + "$ref": "#/components/schemas/VectorStoreObject" + }, + "title": "Data", + "type": "array" + }, + "first_id": { + "title": "First Id", + "type": "string", + "nullable": true + }, + "last_id": { + "title": "Last Id", + "type": "string", + "nullable": true + }, + "has_more": { + "default": false, + "title": "Has More", + "type": "boolean" + } + }, + "required": [ + "data" + ], + "title": "VectorStoreListResponse", + "type": "object" + }, + "OpenAIResponseMessage": { + "description": "Corresponds to the various Message types in the Responses API.\nThey are all under one type because the Responses API gives them all\nthe same \"type\" value, and there is no way to tell them apart in certain\nscenarios.", + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "discriminator": { + "mapping": { + "input_file": "#/$defs/OpenAIResponseInputMessageContentFile", + "input_image": "#/$defs/OpenAIResponseInputMessageContentImage", + "input_text": "#/$defs/OpenAIResponseInputMessageContentText" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile" + } + ] + }, + "type": "array" + }, + { + "items": { + "discriminator": { + "mapping": { + "output_text": "#/$defs/OpenAIResponseOutputMessageContentOutputText", + "refusal": "#/$defs/OpenAIResponseContentPartRefusal" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContentOutputText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseContentPartRefusal" + } + ] + }, + "type": "array" + } + ], + "title": "Content" + }, + "role": { + "anyOf": [ + { + "const": "system", + "type": "string" + }, + { + "const": "developer", + "type": "string" + }, + { + "const": "user", + "type": "string" + }, + { + "const": "assistant", + "type": "string" + } + ], + "title": "Role" + }, + "type": { + "const": "message", + "default": "message", + "title": "Type", + "type": "string" + }, + "id": { + "title": "Id", + "type": "string", + "nullable": true + }, + "status": { + "title": "Status", + "type": "string", + "nullable": true + } + }, + "required": [ + "content", + "role" + ], + "title": "OpenAIResponseMessage", + "type": "object" + }, + "OpenAIResponseObjectWithInput": { + "description": "OpenAI response object extended with input context information.", + "properties": { + "created_at": { + "title": "Created At", + "type": "integer" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError", + "nullable": true + }, + "id": { + "title": "Id", + "type": "string" + }, + "model": { + "title": "Model", + "type": "string" + }, + "object": { + "const": "response", + "default": "response", + "title": "Object", + "type": "string" + }, + "output": { + "items": { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + "title": "Output", + "type": "array" + }, + "parallel_tool_calls": { + "default": false, + "title": "Parallel Tool Calls", + "type": "boolean" + }, + "previous_response_id": { + "title": "Previous Response Id", + "type": "string", + "nullable": true + }, + "prompt": { + "$ref": "#/components/schemas/OpenAIResponsePrompt", + "nullable": true + }, + "status": { + "title": "Status", + "type": "string" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "nullable": true + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "default": { + "format": { + "type": "text" + } + } + }, + "top_p": { + "title": "Top P", + "type": "number", + "nullable": true + }, + "tools": { + "title": "Tools", + "items": { + "discriminator": { + "mapping": { + "file_search": "#/$defs/OpenAIResponseInputToolFileSearch", + "function": "#/$defs/OpenAIResponseInputToolFunction", + "mcp": "#/$defs/OpenAIResponseToolMCP", + "web_search": "#/$defs/OpenAIResponseInputToolWebSearch", + "web_search_preview": "#/$defs/OpenAIResponseInputToolWebSearch", + "web_search_preview_2025_03_11": "#/$defs/OpenAIResponseInputToolWebSearch" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseToolMCP" + } + ] + }, + "type": "array", + "nullable": true + }, + "truncation": { + "title": "Truncation", + "type": "string", + "nullable": true + }, + "usage": { + "$ref": "#/components/schemas/OpenAIResponseUsage", + "nullable": true + }, + "instructions": { + "title": "Instructions", + "type": "string", + "nullable": true + }, + "input": { + "items": { + "anyOf": [ + { + "discriminator": { + "mapping": { + "file_search_call": "#/$defs/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/$defs/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_approval_request": "#/$defs/OpenAIResponseMCPApprovalRequest", + "mcp_call": "#/$defs/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/$defs/OpenAIResponseOutputMessageMCPListTools", + "message": "#/$defs/OpenAIResponseMessage", + "web_search_call": "#/$defs/OpenAIResponseOutputMessageWebSearchToolCall" + }, + "propertyName": "type" + }, + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ] + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalResponse" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "title": "Input", + "type": "array" + } + }, + "required": [ + "created_at", + "id", + "model", + "output", + "status", + "input" + ], + "title": "OpenAIResponseObjectWithInput", + "type": "object" + }, + "ImageContentItem": { + "description": "A image content item", + "properties": { + "type": { + "const": "image", + "default": "image", + "title": "Type", + "type": "string" + }, + "image": { + "$ref": "#/components/schemas/_URLOrData" + } + }, + "required": [ + "image" + ], + "title": "ImageContentItem", + "type": "object" + }, + "PostTrainingMetric": { + "description": "Training metrics captured during post-training jobs.", + "properties": { + "epoch": { + "title": "Epoch", + "type": "integer" + }, + "train_loss": { + "title": "Train Loss", + "type": "number" + }, + "validation_loss": { + "title": "Validation Loss", + "type": "number" + }, + "perplexity": { + "title": "Perplexity", + "type": "number" + } + }, + "required": [ + "epoch", + "train_loss", + "validation_loss", + "perplexity" + ], + "title": "PostTrainingMetric", + "type": "object" + }, + "_safety_run_shield_Request": { + "properties": { + "shield_id": { + "title": "Shield Id", + "type": "string" + }, + "messages": { + "anyOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "title": "Messages" + }, + "params": { + "title": "Params", + "type": "string" + } + }, + "required": [ + "shield_id", + "messages", + "params" + ], + "title": "_safety_run_shield_Request", + "type": "object" + } + }, + "responses": { + "BadRequest400": { + "description": "The request was invalid or malformed", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 400, + "title": "Bad Request", + "detail": "The request was invalid or malformed" + } + } + } + }, + "TooManyRequests429": { + "description": "The client has sent too many requests in a given amount of time", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 429, + "title": "Too Many Requests", + "detail": "You have exceeded the rate limit. Please try again later." + } + } + } + }, + "InternalServerError500": { + "description": "The server encountered an unexpected error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + }, + "example": { + "status": 500, + "title": "Internal Server Error", + "detail": "An unexpected error occurred. Our team has been notified." + } + } + } + }, + "DefaultError": { + "description": "An unexpected error occurred", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Error" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/static/stainless-llama-stack-spec.yaml b/docs/static/stainless-llama-stack-spec.yaml index 118a887d6..22f8d3d5d 100644 --- a/docs/static/stainless-llama-stack-spec.yaml +++ b/docs/static/stainless-llama-stack-spec.yaml @@ -3476,7 +3476,7 @@ paths: post: tags: - V1Beta - summary: Append Rows + summary: Append rows to a dataset. description: Generic endpoint - this would be replaced with actual implementation. operationId: append_rows_v1beta_datasetio_append_rows__dataset_id__post parameters: @@ -3518,16 +3518,10 @@ paths: get: tags: - V1Beta - summary: Iterrows + summary: Get a paginated list of rows from a dataset. description: Query endpoint for proper schema generation. operationId: iterrows_v1beta_datasetio_iterrows__dataset_id__get parameters: - - name: dataset_id - in: path - required: true - schema: - type: string - title: Dataset Id - name: limit in: query required: true @@ -3540,6 +3534,12 @@ paths: schema: type: integer title: Start Index + - name: dataset_id + in: path + required: true + schema: + type: string + title: Dataset Id responses: '200': description: A PaginatedResponse. @@ -3563,7 +3563,7 @@ paths: get: tags: - V1Beta - summary: List Datasets + summary: List all datasets. description: Response-only endpoint for proper schema generation. operationId: list_datasets_v1beta_datasets_get responses: @@ -3588,7 +3588,7 @@ paths: post: tags: - V1Beta - summary: Register Dataset + summary: Register a new dataset. description: Typed endpoint for proper schema generation. operationId: register_dataset_v1beta_datasets_post requestBody: @@ -3620,7 +3620,7 @@ paths: delete: tags: - V1Beta - summary: Unregister Dataset + summary: Unregister a dataset by its ID. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_dataset_v1beta_datasets__dataset_id__delete parameters: @@ -3661,7 +3661,7 @@ paths: get: tags: - V1Beta - summary: Get Dataset + summary: Get a dataset by its ID. description: Query endpoint for proper schema generation. operationId: get_dataset_v1beta_datasets__dataset_id__get parameters: @@ -3694,7 +3694,7 @@ paths: get: tags: - V1Alpha - summary: List Agents + summary: List all agents. description: Query endpoint for proper schema generation. operationId: list_agents_v1alpha_agents_get parameters: @@ -3732,7 +3732,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent + summary: Create an agent with the given configuration. description: Typed endpoint for proper schema generation. operationId: create_agent_v1alpha_agents_post requestBody: @@ -3764,7 +3764,7 @@ paths: delete: tags: - V1Alpha - summary: Delete Agent + summary: Delete an agent by its ID and its associated sessions and turns. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_agent_v1alpha_agents__agent_id__delete parameters: @@ -3783,7 +3783,7 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to delete. responses: '200': description: Successful Response @@ -3805,7 +3805,7 @@ paths: get: tags: - V1Alpha - summary: Get Agent + summary: Describe an agent by its ID. description: Query endpoint for proper schema generation. operationId: get_agent_v1alpha_agents__agent_id__get parameters: @@ -3815,6 +3815,7 @@ paths: schema: type: string title: Agent Id + description: ID of the agent. responses: '200': description: An Agent of the agent. @@ -3838,7 +3839,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent Session + summary: Create a new session for an agent. description: Typed endpoint for proper schema generation. operationId: create_agent_session_v1alpha_agents__agent_id__session_post requestBody: @@ -3872,12 +3873,12 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to create the session for. /v1alpha/agents/{agent_id}/session/{session_id}: delete: tags: - V1Alpha - summary: Delete Agents Session + summary: Delete an agent session by its ID and its associated turns. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_agents_session_v1alpha_agents__agent_id__session__session_id__delete parameters: @@ -3891,18 +3892,18 @@ paths: required: true schema: title: Kwargs - - name: agent_id - in: path - required: true - schema: - type: string - description: 'Path parameter: agent_id' - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to delete. + - name: agent_id + in: path + required: true + schema: + type: string + description: The ID of the agent to delete the session for. responses: '200': description: Successful Response @@ -3924,28 +3925,30 @@ paths: get: tags: - V1Alpha - summary: Get Agents Session + summary: Retrieve an agent session by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_session_v1alpha_agents__agent_id__session__session_id__get parameters: - - name: agent_id - in: path - required: true - schema: - type: string - title: Agent Id - - name: session_id - in: path - required: true - schema: - type: string - title: Session Id - name: turn_ids in: query required: true schema: type: string title: Turn Ids + - name: session_id + in: path + required: true + schema: + type: string + title: Session Id + description: The ID of the session to get. + - name: agent_id + in: path + required: true + schema: + type: string + title: Agent Id + description: The ID of the agent to get the session for. responses: '200': description: A Session. @@ -3969,7 +3972,7 @@ paths: post: tags: - V1Alpha - summary: Create Agent Turn + summary: Create a new turn for an agent. description: Typed endpoint for proper schema generation. operationId: create_agent_turn_v1alpha_agents__agent_id__session__session_id__turn_post requestBody: @@ -4003,18 +4006,18 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to create the turn for. - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to create the turn for. /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}: get: tags: - V1Alpha - summary: Get Agents Turn + summary: Retrieve an agent turn by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__get parameters: @@ -4024,18 +4027,21 @@ paths: schema: type: string title: Agent Id + description: The ID of the agent to get the turn for. - name: session_id in: path required: true schema: type: string title: Session Id + description: The ID of the session to get the turn for. - name: turn_id in: path required: true schema: type: string title: Turn Id + description: The ID of the turn to get. responses: '200': description: A Turn. @@ -4059,7 +4065,7 @@ paths: post: tags: - V1Alpha - summary: Resume Agent Turn + summary: Resume an agent turn with executed tool call responses. description: Typed endpoint for proper schema generation. operationId: resume_agent_turn_v1alpha_agents__agent_id__session__session_id__turn__turn_id__resume_post requestBody: @@ -4093,24 +4099,24 @@ paths: required: true schema: type: string - description: 'Path parameter: agent_id' + description: The ID of the agent to resume. - name: session_id in: path required: true schema: type: string - description: 'Path parameter: session_id' + description: The ID of the session to resume. - name: turn_id in: path required: true schema: type: string - description: 'Path parameter: turn_id' + description: The ID of the turn to resume. /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: get: tags: - V1Alpha - summary: Get Agents Step + summary: Retrieve an agent step by its ID. description: Query endpoint for proper schema generation. operationId: get_agents_step_v1alpha_agents__agent_id__session__session_id__turn__turn_id__step__step_id__get parameters: @@ -4120,24 +4126,28 @@ paths: schema: type: string title: Agent Id + description: The ID of the agent to get the step for. - name: session_id in: path required: true schema: type: string title: Session Id - - name: step_id - in: path - required: true - schema: - type: string - title: Step Id + description: The ID of the session to get the step for. - name: turn_id in: path required: true schema: type: string title: Turn Id + description: The ID of the turn to get the step for. + - name: step_id + in: path + required: true + schema: + type: string + title: Step Id + description: The ID of the step to get. responses: '200': description: An AgentStepResponse. @@ -4161,16 +4171,10 @@ paths: get: tags: - V1Alpha - summary: List Agent Sessions + summary: List all session(s) of a given agent. description: Query endpoint for proper schema generation. operationId: list_agent_sessions_v1alpha_agents__agent_id__sessions_get parameters: - - name: agent_id - in: path - required: true - schema: - type: string - title: Agent Id - name: limit in: query required: true @@ -4183,6 +4187,13 @@ paths: schema: type: integer title: Start Index + - name: agent_id + in: path + required: true + schema: + type: string + title: Agent Id + description: The ID of the agent to list sessions for. responses: '200': description: A PaginatedResponse. @@ -4206,7 +4217,7 @@ paths: get: tags: - V1Alpha - summary: List Benchmarks + summary: List all benchmarks. description: Response-only endpoint for proper schema generation. operationId: list_benchmarks_v1alpha_eval_benchmarks_get responses: @@ -4231,7 +4242,7 @@ paths: post: tags: - V1Alpha - summary: Register Benchmark + summary: Register a benchmark. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_benchmark_v1alpha_eval_benchmarks_post parameters: @@ -4267,7 +4278,7 @@ paths: delete: tags: - V1Alpha - summary: Unregister Benchmark + summary: Unregister a benchmark. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_benchmark_v1alpha_eval_benchmarks__benchmark_id__delete parameters: @@ -4286,7 +4297,7 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to unregister. responses: '200': description: Successful Response @@ -4308,7 +4319,7 @@ paths: get: tags: - V1Alpha - summary: Get Benchmark + summary: Get a benchmark by its ID. description: Query endpoint for proper schema generation. operationId: get_benchmark_v1alpha_eval_benchmarks__benchmark_id__get parameters: @@ -4318,6 +4329,7 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to get. responses: '200': description: A Benchmark. @@ -4341,7 +4353,7 @@ paths: post: tags: - V1Alpha - summary: Evaluate Rows + summary: Evaluate a list of rows on a benchmark. description: Typed endpoint for proper schema generation. operationId: evaluate_rows_v1alpha_eval_benchmarks__benchmark_id__evaluations_post requestBody: @@ -4375,12 +4387,12 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. /v1alpha/eval/benchmarks/{benchmark_id}/jobs: post: tags: - V1Alpha - summary: Run Eval + summary: Run an evaluation on a benchmark. description: Typed endpoint for proper schema generation. operationId: run_eval_v1alpha_eval_benchmarks__benchmark_id__jobs_post requestBody: @@ -4414,12 +4426,12 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}: delete: tags: - V1Alpha - summary: Job Cancel + summary: Cancel a job. description: Generic endpoint - this would be replaced with actual implementation. operationId: job_cancel_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__delete parameters: @@ -4438,13 +4450,13 @@ paths: required: true schema: type: string - description: 'Path parameter: benchmark_id' + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string - description: 'Path parameter: job_id' + description: The ID of the job to cancel. responses: '200': description: Successful Response @@ -4466,7 +4478,7 @@ paths: get: tags: - V1Alpha - summary: Job Status + summary: Get the status of a job. description: Query endpoint for proper schema generation. operationId: job_status_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__get parameters: @@ -4476,12 +4488,14 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string title: Job Id + description: The ID of the job to get the status of. responses: '200': description: The status of the evaluation job. @@ -4505,7 +4519,7 @@ paths: get: tags: - V1Alpha - summary: Job Result + summary: Get the result of a job. description: Query endpoint for proper schema generation. operationId: job_result_v1alpha_eval_benchmarks__benchmark_id__jobs__job_id__result_get parameters: @@ -4515,12 +4529,14 @@ paths: schema: type: string title: Benchmark Id + description: The ID of the benchmark to run the evaluation on. - name: job_id in: path required: true schema: type: string title: Job Id + description: The ID of the job to get the result of. responses: '200': description: The result of the job. @@ -4544,7 +4560,7 @@ paths: post: tags: - V1Alpha - summary: Rerank + summary: Rerank a list of documents based on their relevance to a query. description: Typed endpoint for proper schema generation. operationId: rerank_v1alpha_inference_rerank_post requestBody: @@ -4576,7 +4592,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Job Artifacts + summary: Get the artifacts of a training job. description: Query endpoint for proper schema generation. operationId: get_training_job_artifacts_v1alpha_post_training_job_artifacts_get parameters: @@ -4609,7 +4625,7 @@ paths: post: tags: - V1Alpha - summary: Cancel Training Job + summary: Cancel a training job. description: Generic endpoint - this would be replaced with actual implementation. operationId: cancel_training_job_v1alpha_post_training_job_cancel_post parameters: @@ -4645,7 +4661,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Job Status + summary: Get the status of a training job. description: Query endpoint for proper schema generation. operationId: get_training_job_status_v1alpha_post_training_job_status_get parameters: @@ -4678,7 +4694,7 @@ paths: get: tags: - V1Alpha - summary: Get Training Jobs + summary: Get all training jobs. description: Response-only endpoint for proper schema generation. operationId: get_training_jobs_v1alpha_post_training_jobs_get responses: @@ -4704,7 +4720,7 @@ paths: post: tags: - V1Alpha - summary: Preference Optimize + summary: Run preference optimization of a model. description: Typed endpoint for proper schema generation. operationId: preference_optimize_v1alpha_post_training_preference_optimize_post requestBody: @@ -4736,7 +4752,7 @@ paths: post: tags: - V1Alpha - summary: Supervised Fine Tune + summary: Run supervised fine-tuning of a model. description: Typed endpoint for proper schema generation. operationId: supervised_fine_tune_v1alpha_post_training_supervised_fine_tune_post requestBody: @@ -4768,7 +4784,7 @@ paths: get: tags: - V1 - summary: List Batches + summary: List all batches for the current user. description: Query endpoint for proper schema generation. operationId: list_batches_v1_batches_get parameters: @@ -4807,7 +4823,7 @@ paths: post: tags: - V1 - summary: Create Batch + summary: Create a new batch for processing multiple API requests. description: Typed endpoint for proper schema generation. operationId: create_batch_v1_batches_post requestBody: @@ -4839,7 +4855,7 @@ paths: get: tags: - V1 - summary: Retrieve Batch + summary: Retrieve information about a specific batch. description: Query endpoint for proper schema generation. operationId: retrieve_batch_v1_batches__batch_id__get parameters: @@ -4849,6 +4865,7 @@ paths: schema: type: string title: Batch Id + description: The ID of the batch to retrieve. responses: '200': description: The batch object. @@ -4872,7 +4889,7 @@ paths: post: tags: - V1 - summary: Cancel Batch + summary: Cancel a batch that is in progress. description: Typed endpoint for proper schema generation. operationId: cancel_batch_v1_batches__batch_id__cancel_post requestBody: @@ -4906,12 +4923,12 @@ paths: required: true schema: type: string - description: 'Path parameter: batch_id' + description: The ID of the batch to cancel. /v1/chat/completions: get: tags: - V1 - summary: List Chat Completions + summary: List chat completions. description: Query endpoint for proper schema generation. operationId: list_chat_completions_v1_chat_completions_get parameters: @@ -4962,7 +4979,7 @@ paths: post: tags: - V1 - summary: Openai Chat Completion + summary: Create chat completions. description: Typed endpoint for proper schema generation. operationId: openai_chat_completion_v1_chat_completions_post requestBody: @@ -4994,7 +5011,7 @@ paths: get: tags: - V1 - summary: Get Chat Completion + summary: Get chat completion. description: Query endpoint for proper schema generation. operationId: get_chat_completion_v1_chat_completions__completion_id__get parameters: @@ -5004,6 +5021,7 @@ paths: schema: type: string title: Completion Id + description: ID of the chat completion. responses: '200': description: A OpenAICompletionWithInputMessages. @@ -5027,7 +5045,7 @@ paths: post: tags: - V1 - summary: Openai Completion + summary: Create completion. description: Typed endpoint for proper schema generation. operationId: openai_completion_v1_completions_post requestBody: @@ -5059,7 +5077,7 @@ paths: post: tags: - V1 - summary: Create Conversation + summary: Create a conversation. description: Typed endpoint for proper schema generation. operationId: create_conversation_v1_conversations_post requestBody: @@ -5091,7 +5109,7 @@ paths: delete: tags: - V1 - summary: Openai Delete Conversation + summary: Delete a conversation. description: Query endpoint for proper schema generation. operationId: openai_delete_conversation_v1_conversations__conversation_id__delete parameters: @@ -5101,6 +5119,7 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. responses: '200': description: The deleted conversation resource. @@ -5123,7 +5142,7 @@ paths: get: tags: - V1 - summary: Get Conversation + summary: Retrieve a conversation. description: Query endpoint for proper schema generation. operationId: get_conversation_v1_conversations__conversation_id__get parameters: @@ -5133,6 +5152,7 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. responses: '200': description: The conversation object. @@ -5155,7 +5175,7 @@ paths: post: tags: - V1 - summary: Update Conversation + summary: Update a conversation. description: Typed endpoint for proper schema generation. operationId: update_conversation_v1_conversations__conversation_id__post requestBody: @@ -5189,21 +5209,15 @@ paths: required: true schema: type: string - description: 'Path parameter: conversation_id' + description: The conversation identifier. /v1/conversations/{conversation_id}/items: get: tags: - V1 - summary: List Items + summary: List items. description: Query endpoint for proper schema generation. operationId: list_items_v1_conversations__conversation_id__items_get parameters: - - name: conversation_id - in: path - required: true - schema: - type: string - title: Conversation Id - name: after in: query required: true @@ -5227,6 +5241,13 @@ paths: schema: type: string title: Order + - name: conversation_id + in: path + required: true + schema: + type: string + title: Conversation Id + description: The conversation identifier. responses: '200': description: List of conversation items. @@ -5249,7 +5270,7 @@ paths: post: tags: - V1 - summary: Add Items + summary: Create items. description: Typed endpoint for proper schema generation. operationId: add_items_v1_conversations__conversation_id__items_post requestBody: @@ -5283,12 +5304,12 @@ paths: required: true schema: type: string - description: 'Path parameter: conversation_id' + description: The conversation identifier. /v1/conversations/{conversation_id}/items/{item_id}: delete: tags: - V1 - summary: Openai Delete Conversation Item + summary: Delete an item. description: Query endpoint for proper schema generation. operationId: openai_delete_conversation_item_v1_conversations__conversation_id__items__item_id__delete parameters: @@ -5298,12 +5319,14 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. - name: item_id in: path required: true schema: type: string title: Item Id + description: The item identifier. responses: '200': description: The deleted item resource. @@ -5326,7 +5349,7 @@ paths: get: tags: - V1 - summary: Retrieve + summary: Retrieve an item. description: Query endpoint for proper schema generation. operationId: retrieve_v1_conversations__conversation_id__items__item_id__get parameters: @@ -5336,12 +5359,14 @@ paths: schema: type: string title: Conversation Id + description: The conversation identifier. - name: item_id in: path required: true schema: type: string title: Item Id + description: The item identifier. responses: '200': description: The conversation item. @@ -5365,7 +5390,7 @@ paths: post: tags: - V1 - summary: Openai Embeddings + summary: Create embeddings. description: Typed endpoint for proper schema generation. operationId: openai_embeddings_v1_embeddings_post requestBody: @@ -5397,7 +5422,7 @@ paths: get: tags: - V1 - summary: Openai List Files + summary: List files. description: Query endpoint for proper schema generation. operationId: openai_list_files_v1_files_get parameters: @@ -5447,7 +5472,7 @@ paths: post: tags: - V1 - summary: Openai Upload File + summary: Upload file. description: Response-only endpoint for proper schema generation. operationId: openai_upload_file_v1_files_post responses: @@ -5473,7 +5498,7 @@ paths: delete: tags: - V1 - summary: Openai Delete File + summary: Delete file. description: Query endpoint for proper schema generation. operationId: openai_delete_file_v1_files__file_id__delete parameters: @@ -5483,6 +5508,7 @@ paths: schema: type: string title: File Id + description: The ID of the file to use for this request. responses: '200': description: An OpenAIFileDeleteResponse indicating successful deletion. @@ -5505,7 +5531,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve File + summary: Retrieve file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_file_v1_files__file_id__get parameters: @@ -5515,6 +5541,7 @@ paths: schema: type: string title: File Id + description: The ID of the file to use for this request. responses: '200': description: An OpenAIFileObject containing file information. @@ -5538,7 +5565,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve File Content + summary: Retrieve file content. description: Generic endpoint - this would be replaced with actual implementation. operationId: openai_retrieve_file_content_v1_files__file_id__content_get parameters: @@ -5557,7 +5584,7 @@ paths: required: true schema: type: string - description: 'Path parameter: file_id' + description: The ID of the file to use for this request. responses: '200': description: The raw file content as a binary response. @@ -5580,7 +5607,7 @@ paths: get: tags: - V1 - summary: Health + summary: Get health status. description: Response-only endpoint for proper schema generation. operationId: health_v1_health_get responses: @@ -5606,7 +5633,7 @@ paths: get: tags: - V1 - summary: List Routes + summary: List routes. description: Response-only endpoint for proper schema generation. operationId: list_routes_v1_inspect_routes_get responses: @@ -5632,7 +5659,7 @@ paths: get: tags: - V1 - summary: List Models + summary: List all models. description: Response-only endpoint for proper schema generation. operationId: list_models_v1_models_get responses: @@ -5657,7 +5684,7 @@ paths: post: tags: - V1 - summary: Register Model + summary: Register model. description: Typed endpoint for proper schema generation. operationId: register_model_v1_models_post requestBody: @@ -5689,7 +5716,7 @@ paths: delete: tags: - V1 - summary: Unregister Model + summary: Unregister model. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_model_v1_models__model_id__delete parameters: @@ -5730,7 +5757,7 @@ paths: get: tags: - V1 - summary: Get Model + summary: Get model. description: Query endpoint for proper schema generation. operationId: get_model_v1_models__model_id__get parameters: @@ -5763,7 +5790,7 @@ paths: post: tags: - V1 - summary: Run Moderation + summary: Create moderation. description: Typed endpoint for proper schema generation. operationId: run_moderation_v1_moderations_post requestBody: @@ -5795,7 +5822,7 @@ paths: get: tags: - V1 - summary: List Prompts + summary: List all prompts. description: Response-only endpoint for proper schema generation. operationId: list_prompts_v1_prompts_get responses: @@ -5820,7 +5847,7 @@ paths: post: tags: - V1 - summary: Create Prompt + summary: Create prompt. description: Typed endpoint for proper schema generation. operationId: create_prompt_v1_prompts_post requestBody: @@ -5852,7 +5879,7 @@ paths: delete: tags: - V1 - summary: Delete Prompt + summary: Delete prompt. description: Generic endpoint - this would be replaced with actual implementation. operationId: delete_prompt_v1_prompts__prompt_id__delete parameters: @@ -5866,13 +5893,12 @@ paths: required: true schema: title: Kwargs - - &id001 - name: prompt_id + - name: prompt_id in: path required: true schema: type: string - description: 'Path parameter: prompt_id' + description: The identifier of the prompt to delete. responses: '200': description: Successful Response @@ -5894,22 +5920,23 @@ paths: get: tags: - V1 - summary: Get Prompt + summary: Get prompt. description: Query endpoint for proper schema generation. operationId: get_prompt_v1_prompts__prompt_id__get parameters: - - name: prompt_id - in: path - required: true - schema: - type: string - title: Prompt Id - name: version in: query required: true schema: type: integer title: Version + - name: prompt_id + in: path + required: true + schema: + type: string + title: Prompt Id + description: The identifier of the prompt to get. responses: '200': description: A Prompt resource. @@ -5932,7 +5959,7 @@ paths: post: tags: - V1 - summary: Update Prompt + summary: Update prompt. description: Typed endpoint for proper schema generation. operationId: update_prompt_v1_prompts__prompt_id__post requestBody: @@ -5961,12 +5988,17 @@ paths: $ref: '#/components/responses/DefaultError' description: Default Response parameters: - - *id001 + - name: prompt_id + in: path + required: true + schema: + type: string + description: The identifier of the prompt to update. /v1/prompts/{prompt_id}/set-default-version: post: tags: - V1 - summary: Set Default Version + summary: Set prompt version. description: Typed endpoint for proper schema generation. operationId: set_default_version_v1_prompts__prompt_id__set_default_version_post requestBody: @@ -6000,12 +6032,12 @@ paths: required: true schema: type: string - description: 'Path parameter: prompt_id' + description: The identifier of the prompt. /v1/prompts/{prompt_id}/versions: get: tags: - V1 - summary: List Prompt Versions + summary: List prompt versions. description: Query endpoint for proper schema generation. operationId: list_prompt_versions_v1_prompts__prompt_id__versions_get parameters: @@ -6015,6 +6047,7 @@ paths: schema: type: string title: Prompt Id + description: The identifier of the prompt to list versions for. responses: '200': description: A ListPromptsResponse containing all versions of the prompt. @@ -6038,7 +6071,7 @@ paths: get: tags: - V1 - summary: List Providers + summary: List providers. description: Response-only endpoint for proper schema generation. operationId: list_providers_v1_providers_get responses: @@ -6064,7 +6097,7 @@ paths: get: tags: - V1 - summary: Inspect Provider + summary: Get provider. description: Query endpoint for proper schema generation. operationId: inspect_provider_v1_providers__provider_id__get parameters: @@ -6074,6 +6107,7 @@ paths: schema: type: string title: Provider Id + description: The ID of the provider to inspect. responses: '200': description: A ProviderInfo object containing the provider's details. @@ -6097,7 +6131,7 @@ paths: get: tags: - V1 - summary: List Openai Responses + summary: List all responses. description: Query endpoint for proper schema generation. operationId: list_openai_responses_v1_responses_get parameters: @@ -6148,7 +6182,7 @@ paths: post: tags: - V1 - summary: Create Openai Response + summary: Create a model response. description: Typed endpoint for proper schema generation. operationId: create_openai_response_v1_responses_post requestBody: @@ -6180,7 +6214,7 @@ paths: delete: tags: - V1 - summary: Delete Openai Response + summary: Delete a response. description: Query endpoint for proper schema generation. operationId: delete_openai_response_v1_responses__response_id__delete parameters: @@ -6190,6 +6224,7 @@ paths: schema: type: string title: Response Id + description: The ID of the OpenAI response to delete. responses: '200': description: An OpenAIDeleteResponseObject @@ -6212,7 +6247,7 @@ paths: get: tags: - V1 - summary: Get Openai Response + summary: Get a model response. description: Query endpoint for proper schema generation. operationId: get_openai_response_v1_responses__response_id__get parameters: @@ -6222,6 +6257,7 @@ paths: schema: type: string title: Response Id + description: The ID of the OpenAI response to retrieve. responses: '200': description: An OpenAIResponseObject. @@ -6245,16 +6281,10 @@ paths: get: tags: - V1 - summary: List Openai Response Input Items + summary: List input items. description: Query endpoint for proper schema generation. operationId: list_openai_response_input_items_v1_responses__response_id__input_items_get parameters: - - name: response_id - in: path - required: true - schema: - type: string - title: Response Id - name: after in: query required: true @@ -6286,6 +6316,13 @@ paths: schema: $ref: '#/components/schemas/Order' default: desc + - name: response_id + in: path + required: true + schema: + type: string + title: Response Id + description: The ID of the response to retrieve input items for. responses: '200': description: An ListOpenAIResponseInputItem. @@ -6309,7 +6346,7 @@ paths: post: tags: - V1 - summary: Run Shield + summary: Run shield. description: Typed endpoint for proper schema generation. operationId: run_shield_v1_safety_run_shield_post requestBody: @@ -6341,7 +6378,7 @@ paths: get: tags: - V1 - summary: List Scoring Functions + summary: List all scoring functions. description: Response-only endpoint for proper schema generation. operationId: list_scoring_functions_v1_scoring_functions_get responses: @@ -6366,7 +6403,7 @@ paths: post: tags: - V1 - summary: Register Scoring Function + summary: Register a scoring function. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_scoring_function_v1_scoring_functions_post parameters: @@ -6402,7 +6439,7 @@ paths: delete: tags: - V1 - summary: Unregister Scoring Function + summary: Unregister a scoring function. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_scoring_function_v1_scoring_functions__scoring_fn_id__delete parameters: @@ -6443,7 +6480,7 @@ paths: get: tags: - V1 - summary: Get Scoring Function + summary: Get a scoring function by its ID. description: Query endpoint for proper schema generation. operationId: get_scoring_function_v1_scoring_functions__scoring_fn_id__get parameters: @@ -6476,7 +6513,7 @@ paths: post: tags: - V1 - summary: Score + summary: Score a list of rows. description: Typed endpoint for proper schema generation. operationId: score_v1_scoring_score_post requestBody: @@ -6508,7 +6545,7 @@ paths: post: tags: - V1 - summary: Score Batch + summary: Score a batch of rows. description: Typed endpoint for proper schema generation. operationId: score_batch_v1_scoring_score_batch_post requestBody: @@ -6540,7 +6577,7 @@ paths: get: tags: - V1 - summary: List Shields + summary: List all shields. description: Response-only endpoint for proper schema generation. operationId: list_shields_v1_shields_get responses: @@ -6565,7 +6602,7 @@ paths: post: tags: - V1 - summary: Register Shield + summary: Register a shield. description: Typed endpoint for proper schema generation. operationId: register_shield_v1_shields_post requestBody: @@ -6597,7 +6634,7 @@ paths: delete: tags: - V1 - summary: Unregister Shield + summary: Unregister a shield. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_shield_v1_shields__identifier__delete parameters: @@ -6638,7 +6675,7 @@ paths: get: tags: - V1 - summary: Get Shield + summary: Get a shield by its identifier. description: Query endpoint for proper schema generation. operationId: get_shield_v1_shields__identifier__get parameters: @@ -6671,7 +6708,7 @@ paths: post: tags: - V1 - summary: Invoke Tool + summary: Run a tool with the given arguments. description: Typed endpoint for proper schema generation. operationId: invoke_tool_v1_tool_runtime_invoke_post requestBody: @@ -6703,7 +6740,7 @@ paths: get: tags: - V1 - summary: List Runtime Tools + summary: List all tools in the runtime. description: Query endpoint for proper schema generation. operationId: list_runtime_tools_v1_tool_runtime_list_tools_get parameters: @@ -6742,7 +6779,7 @@ paths: post: tags: - V1 - summary: Rag Tool.Insert + summary: Index documents so they can be used by the RAG system. description: Generic endpoint - this would be replaced with actual implementation. operationId: rag_tool_insert_v1_tool_runtime_rag_tool_insert_post parameters: @@ -6778,7 +6815,7 @@ paths: post: tags: - V1 - summary: Rag Tool.Query + summary: Query the RAG system for context; typically invoked by the agent. description: Typed endpoint for proper schema generation. operationId: rag_tool_query_v1_tool_runtime_rag_tool_query_post requestBody: @@ -6810,7 +6847,7 @@ paths: get: tags: - V1 - summary: List Tool Groups + summary: List tool groups with optional provider. description: Response-only endpoint for proper schema generation. operationId: list_tool_groups_v1_toolgroups_get responses: @@ -6835,7 +6872,7 @@ paths: post: tags: - V1 - summary: Register Tool Group + summary: Register a tool group. description: Generic endpoint - this would be replaced with actual implementation. operationId: register_tool_group_v1_toolgroups_post parameters: @@ -6871,7 +6908,7 @@ paths: delete: tags: - V1 - summary: Unregister Toolgroup + summary: Unregister a tool group. description: Generic endpoint - this would be replaced with actual implementation. operationId: unregister_toolgroup_v1_toolgroups__toolgroup_id__delete parameters: @@ -6912,7 +6949,7 @@ paths: get: tags: - V1 - summary: Get Tool Group + summary: Get a tool group by its ID. description: Query endpoint for proper schema generation. operationId: get_tool_group_v1_toolgroups__toolgroup_id__get parameters: @@ -6945,7 +6982,7 @@ paths: get: tags: - V1 - summary: List Tools + summary: List tools with optional tool group. description: Query endpoint for proper schema generation. operationId: list_tools_v1_tools_get parameters: @@ -6978,7 +7015,7 @@ paths: get: tags: - V1 - summary: Get Tool + summary: Get a tool by its name. description: Query endpoint for proper schema generation. operationId: get_tool_v1_tools__tool_name__get parameters: @@ -7011,7 +7048,7 @@ paths: post: tags: - V1 - summary: Insert Chunks + summary: Insert chunks into a vector database. description: Generic endpoint - this would be replaced with actual implementation. operationId: insert_chunks_v1_vector_io_insert_post parameters: @@ -7047,7 +7084,7 @@ paths: post: tags: - V1 - summary: Query Chunks + summary: Query chunks from a vector database. description: Typed endpoint for proper schema generation. operationId: query_chunks_v1_vector_io_query_post requestBody: @@ -7079,7 +7116,7 @@ paths: get: tags: - V1 - summary: Openai List Vector Stores + summary: Returns a list of vector stores. description: Query endpoint for proper schema generation. operationId: openai_list_vector_stores_v1_vector_stores_get parameters: @@ -7131,7 +7168,7 @@ paths: post: tags: - V1 - summary: Openai Create Vector Store + summary: Creates a vector store. description: Typed endpoint for proper schema generation. operationId: openai_create_vector_store_v1_vector_stores_post requestBody: @@ -7163,7 +7200,7 @@ paths: delete: tags: - V1 - summary: Openai Delete Vector Store + summary: Delete a vector store. description: Query endpoint for proper schema generation. operationId: openai_delete_vector_store_v1_vector_stores__vector_store_id__delete parameters: @@ -7173,6 +7210,7 @@ paths: schema: type: string title: Vector Store Id + description: The ID of the vector store to delete. responses: '200': description: A VectorStoreDeleteResponse indicating the deletion status. @@ -7195,7 +7233,7 @@ paths: get: tags: - V1 - summary: Openai Retrieve Vector Store + summary: Retrieves a vector store. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_v1_vector_stores__vector_store_id__get parameters: @@ -7205,6 +7243,7 @@ paths: schema: type: string title: Vector Store Id + description: The ID of the vector store to retrieve. responses: '200': description: A VectorStoreObject representing the vector store. @@ -7227,7 +7266,7 @@ paths: post: tags: - V1 - summary: Openai Update Vector Store + summary: Updates a vector store. description: Typed endpoint for proper schema generation. operationId: openai_update_vector_store_v1_vector_stores__vector_store_id__post requestBody: @@ -7261,12 +7300,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to update. /v1/vector_stores/{vector_store_id}/file_batches: post: tags: - V1 - summary: Openai Create Vector Store File Batch + summary: Create a vector store file batch. description: Typed endpoint for proper schema generation. operationId: openai_create_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches_post requestBody: @@ -7300,12 +7339,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to create the file batch for. /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}: get: tags: - V1 - summary: Openai Retrieve Vector Store File Batch + summary: Retrieve a vector store file batch. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__get parameters: @@ -7315,12 +7354,14 @@ paths: schema: type: string title: Batch Id + description: The ID of the file batch to retrieve. - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file batch. responses: '200': description: A VectorStoreFileBatchObject representing the file batch. @@ -7344,7 +7385,7 @@ paths: post: tags: - V1 - summary: Openai Cancel Vector Store File Batch + summary: Cancels a vector store file batch. description: Typed endpoint for proper schema generation. operationId: openai_cancel_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post requestBody: @@ -7373,38 +7414,26 @@ paths: description: Default Response $ref: '#/components/responses/DefaultError' parameters: - - name: vector_store_id - in: path - required: true - schema: - type: string - description: 'Path parameter: vector_store_id' - name: batch_id in: path required: true schema: type: string - description: 'Path parameter: batch_id' + description: The ID of the file batch to cancel. + - name: vector_store_id + in: path + required: true + schema: + type: string + description: The ID of the vector store containing the file batch. /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files: get: tags: - V1 - summary: Openai List Files In Vector Store File Batch + summary: Returns a list of vector store files in a batch. description: Query endpoint for proper schema generation. operationId: openai_list_files_in_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get parameters: - - name: batch_id - in: path - required: true - schema: - type: string - title: Batch Id - - name: vector_store_id - in: path - required: true - schema: - type: string - title: Vector Store Id - name: after in: query required: true @@ -7437,6 +7466,20 @@ paths: type: string default: desc title: Order + - name: batch_id + in: path + required: true + schema: + type: string + title: Batch Id + description: The ID of the file batch to list files from. + - name: vector_store_id + in: path + required: true + schema: + type: string + title: Vector Store Id + description: The ID of the vector store containing the file batch. responses: '200': description: A VectorStoreFilesListInBatchResponse containing the list of files in the batch. @@ -7460,16 +7503,10 @@ paths: get: tags: - V1 - summary: Openai List Files In Vector Store + summary: List files in a vector store. description: Query endpoint for proper schema generation. operationId: openai_list_files_in_vector_store_v1_vector_stores__vector_store_id__files_get parameters: - - name: vector_store_id - in: path - required: true - schema: - type: string - title: Vector Store Id - name: after in: query required: true @@ -7502,6 +7539,13 @@ paths: type: string default: desc title: Order + - name: vector_store_id + in: path + required: true + schema: + type: string + title: Vector Store Id + description: The ID of the vector store to list files from. responses: '200': description: A VectorStoreListFilesResponse containing the list of files. @@ -7524,7 +7568,7 @@ paths: post: tags: - V1 - summary: Openai Attach File To Vector Store + summary: Attach a file to a vector store. description: Typed endpoint for proper schema generation. operationId: openai_attach_file_to_vector_store_v1_vector_stores__vector_store_id__files_post requestBody: @@ -7558,27 +7602,29 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to attach the file to. /v1/vector_stores/{vector_store_id}/files/{file_id}: delete: tags: - V1 - summary: Openai Delete Vector Store File + summary: Delete a vector store file. description: Query endpoint for proper schema generation. operationId: openai_delete_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__delete parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to delete. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to delete. responses: '200': description: A VectorStoreFileDeleteResponse indicating the deletion status. @@ -7601,22 +7647,24 @@ paths: get: tags: - V1 - summary: Openai Retrieve Vector Store File + summary: Retrieves a vector store file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__get parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to retrieve. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to retrieve. responses: '200': description: A VectorStoreFileObject representing the file. @@ -7639,7 +7687,7 @@ paths: post: tags: - V1 - summary: Openai Update Vector Store File + summary: Updates a vector store file. description: Typed endpoint for proper schema generation. operationId: openai_update_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__post requestBody: @@ -7673,33 +7721,35 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store containing the file to update. - name: file_id in: path required: true schema: type: string - description: 'Path parameter: file_id' + description: The ID of the file to update. /v1/vector_stores/{vector_store_id}/files/{file_id}/content: get: tags: - V1 - summary: Openai Retrieve Vector Store File Contents + summary: Retrieves the contents of a vector store file. description: Query endpoint for proper schema generation. operationId: openai_retrieve_vector_store_file_contents_v1_vector_stores__vector_store_id__files__file_id__content_get parameters: - - name: file_id - in: path - required: true - schema: - type: string - title: File Id - name: vector_store_id in: path required: true schema: type: string title: Vector Store Id + description: The ID of the vector store containing the file to retrieve. + - name: file_id + in: path + required: true + schema: + type: string + title: File Id + description: The ID of the file to retrieve. responses: '200': description: A list of InterleavedContent representing the file contents. @@ -7723,7 +7773,7 @@ paths: post: tags: - V1 - summary: Openai Search Vector Store + summary: Search for chunks in a vector store. description: Typed endpoint for proper schema generation. operationId: openai_search_vector_store_v1_vector_stores__vector_store_id__search_post requestBody: @@ -7757,12 +7807,12 @@ paths: required: true schema: type: string - description: 'Path parameter: vector_store_id' + description: The ID of the vector store to search. /v1/version: get: tags: - V1 - summary: Version + summary: Get version. description: Response-only endpoint for proper schema generation. operationId: version_v1_version_get responses: @@ -7799,7 +7849,7 @@ components: required: - config title: AgentCandidate - description: "An agent candidate for evaluation.\n\n:param config: The configuration for the agent candidate." + description: An agent candidate for evaluation. AgentConfig: properties: sampling_params: @@ -7866,7 +7916,7 @@ components: - model - instructions title: AgentConfig - description: "Configuration for an agent.\n\n:param model: The model identifier to use for the agent\n:param instructions: The system instructions for the agent\n:param name: Optional name for the agent, used in telemetry and identification\n:param enable_session_persistence: Optional flag indicating whether session data has to be persisted\n:param response_format: Optional response format configuration" + description: Configuration for an agent. AgentCreateResponse: properties: agent_id: @@ -7876,7 +7926,7 @@ components: required: - agent_id title: AgentCreateResponse - description: "Response returned when creating a new agent.\n\n:param agent_id: Unique identifier for the created agent" + description: Response returned when creating a new agent. AgentSessionCreateResponse: properties: session_id: @@ -7886,7 +7936,7 @@ components: required: - session_id title: AgentSessionCreateResponse - description: "Response returned when creating a new agent session.\n\n:param session_id: Unique identifier for the created session" + description: Response returned when creating a new agent session. AgentToolGroupWithArgs: properties: name: @@ -7910,7 +7960,7 @@ components: default: agent_turn_input type: object title: AgentTurnInputType - description: "Parameter type for agent turn input.\n\n:param type: Discriminator type. Always \"agent_turn_input\"" + description: Parameter type for agent turn input. AggregationFunctionType: type: string enum: @@ -7920,7 +7970,7 @@ components: - categorical_count - accuracy title: AggregationFunctionType - description: "Types of aggregation functions for scoring results.\n:cvar average: Calculate the arithmetic mean of scores\n:cvar weighted_average: Calculate a weighted average of scores\n:cvar median: Calculate the median value of scores\n:cvar categorical_count: Count occurrences of categorical values\n:cvar accuracy: Calculate accuracy as the proportion of correct answers" + description: Types of aggregation functions for scoring results. AllowedToolsFilter: properties: tool_names: @@ -7930,7 +7980,7 @@ components: type: array type: object title: AllowedToolsFilter - description: "Filter configuration for restricting which MCP tools can be used.\n\n:param tool_names: (Optional) List of specific tool names that are allowed" + description: Filter configuration for restricting which MCP tools can be used. ApprovalFilter: properties: always: @@ -7945,7 +7995,7 @@ components: type: array type: object title: ApprovalFilter - description: "Filter configuration for MCP tool approval requirements.\n\n:param always: (Optional) List of tool names that always require approval\n:param never: (Optional) List of tool names that never require approval" + description: Filter configuration for MCP tool approval requirements. ArrayType: properties: type: @@ -7955,7 +8005,7 @@ components: default: array type: object title: ArrayType - description: "Parameter type for array values.\n\n:param type: Discriminator type. Always \"array\"" + description: Parameter type for array values. Attachment-Output: properties: content: @@ -7989,7 +8039,7 @@ components: - content - mime_type title: Attachment - description: "An attachment to an agent turn.\n\n:param content: The content of the attachment.\n:param mime_type: The MIME type of the attachment." + description: An attachment to an agent turn. BasicScoringFnParams: properties: type: @@ -8005,7 +8055,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: BasicScoringFnParams - description: "Parameters for basic scoring function configuration.\n:param type: The type of scoring function parameters, always basic\n:param aggregation_functions: Aggregation functions to apply to the scores of each row" + description: Parameters for basic scoring function configuration. Batch: properties: id: @@ -8192,7 +8242,7 @@ components: - dataset_id - scoring_functions title: Benchmark - description: "A benchmark resource for evaluating model performance.\n\n:param dataset_id: Identifier of the dataset to use for the benchmark evaluation\n:param scoring_functions: List of scoring function identifiers to apply during evaluation\n:param metadata: Metadata for this evaluation task\n:param type: The resource type, always benchmark" + description: A benchmark resource for evaluating model performance. BenchmarkConfig: properties: eval_candidate: @@ -8228,7 +8278,7 @@ components: required: - eval_candidate title: BenchmarkConfig - description: "A benchmark configuration for evaluation.\n\n:param eval_candidate: The candidate to evaluate.\n:param scoring_params: Map between scoring function id and parameters for each scoring function you want to run\n:param num_examples: (Optional) The number of examples to evaluate. If not provided, all examples in the dataset will be evaluated" + description: A benchmark configuration for evaluation. BooleanType: properties: type: @@ -8238,7 +8288,7 @@ components: default: boolean type: object title: BooleanType - description: "Parameter type for boolean values.\n\n:param type: Discriminator type. Always \"boolean\"" + description: Parameter type for boolean values. BuiltinTool: type: string enum: @@ -8256,7 +8306,7 @@ components: default: chat_completion_input type: object title: ChatCompletionInputType - description: "Parameter type for chat completion input.\n\n:param type: Discriminator type. Always \"chat_completion_input\"" + description: Parameter type for chat completion input. Chunk-Output: properties: content: @@ -8300,7 +8350,7 @@ components: - content - chunk_id title: Chunk - description: "A chunk of content that can be inserted into a vector database.\n:param content: The content of the chunk, which can be interleaved text, images, or other types.\n:param chunk_id: Unique identifier for the chunk. Must be provided explicitly.\n:param metadata: Metadata associated with the chunk that will be used in the model context during inference.\n:param embedding: Optional embedding for the chunk. If not provided, it will be computed later.\n:param chunk_metadata: Metadata for the chunk that will NOT be used in the context during inference.\n The `chunk_metadata` is required backend functionality." + description: A chunk of content that can be inserted into a vector database. ChunkMetadata: properties: chunk_id: @@ -8338,7 +8388,7 @@ components: type: integer type: object title: ChunkMetadata - description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference.\n:param chunk_id: The ID of the chunk. If not set, it will be generated based on the document ID and content.\n:param document_id: The ID of the document this chunk belongs to.\n:param source: The source of the content, such as a URL, file path, or other identifier.\n:param created_timestamp: An optional timestamp indicating when the chunk was created.\n:param updated_timestamp: An optional timestamp indicating when the chunk was last updated.\n:param chunk_window: The window of the chunk, which can be used to group related chunks together.\n:param chunk_tokenizer: The tokenizer used to create the chunk. Default is Tiktoken.\n:param chunk_embedding_model: The embedding model used to create the chunk's embedding.\n:param chunk_embedding_dimension: The dimension of the embedding vector for the chunk.\n:param content_token_count: The number of tokens in the content of the chunk.\n:param metadata_token_count: The number of tokens in the metadata of the chunk." + description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that\n will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after.\n Use `Chunk.metadata` for metadata that will be used in the context during inference." CompletionInputType: properties: type: @@ -8348,7 +8398,7 @@ components: default: completion_input type: object title: CompletionInputType - description: "Parameter type for completion input.\n\n:param type: Discriminator type. Always \"completion_input\"" + description: Parameter type for completion input. CompletionMessage-Output: properties: role: @@ -8390,7 +8440,7 @@ components: - content - stop_reason title: CompletionMessage - description: "A message containing the model's (assistant) response in a chat conversation.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param stop_reason: Reason why the model stopped generating. Options are:\n - `StopReason.end_of_turn`: The model finished generating the entire response.\n - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response.\n - `StopReason.out_of_tokens`: The model ran out of token budget.\n:param tool_calls: List of tool calls. Each tool call is a ToolCall object." + description: A message containing the model's (assistant) response in a chat conversation. Conversation: properties: id: @@ -8502,7 +8552,7 @@ components: required: - beta title: DPOAlignmentConfig - description: "Configuration for Direct Preference Optimization (DPO) alignment.\n\n:param beta: Temperature parameter for the DPO loss\n:param loss_type: The type of loss function to use for DPO" + description: Configuration for Direct Preference Optimization (DPO) alignment. DPOLossType: type: string enum: @@ -8542,7 +8592,7 @@ components: - shuffle - data_format title: DataConfig - description: "Configuration for training data and data loading.\n\n:param dataset_id: Unique identifier for the training dataset\n:param batch_size: Number of samples per training batch\n:param shuffle: Whether to shuffle the dataset during training\n:param data_format: Format of the dataset (instruct or dialog)\n:param validation_dataset_id: (Optional) Unique identifier for the validation dataset\n:param packed: (Optional) Whether to pack multiple samples into a single sequence for efficiency\n:param train_on_input: (Optional) Whether to compute loss on input tokens as well as output tokens" + description: Configuration for training data and data loading. Dataset: properties: identifier: @@ -8586,14 +8636,14 @@ components: - purpose - source title: Dataset - description: "Dataset resource for storing and accessing training or evaluation data.\n\n:param type: Type of resource, always 'dataset' for datasets" + description: Dataset resource for storing and accessing training or evaluation data. DatasetFormat: type: string enum: - instruct - dialog title: DatasetFormat - description: "Format of the training dataset.\n:cvar instruct: Instruction-following format with prompt and completion\n:cvar dialog: Multi-turn conversation format with messages" + description: Format of the training dataset. DatasetPurpose: type: string enum: @@ -8601,7 +8651,7 @@ components: - eval/question-answer - eval/messages-answer title: DatasetPurpose - description: "Purpose of the dataset. Each purpose has a required input data schema.\n\n:cvar post-training/messages: The dataset contains messages used for post-training.\n {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello, world!\"},\n {\"role\": \"assistant\", \"content\": \"Hello, world!\"},\n ]\n }\n:cvar eval/question-answer: The dataset contains a question column and an answer column.\n {\n \"question\": \"What is the capital of France?\",\n \"answer\": \"Paris\"\n }\n:cvar eval/messages-answer: The dataset contains a messages column with list of messages and an answer column.\n {\n \"messages\": [\n {\"role\": \"user\", \"content\": \"Hello, my name is John Doe.\"},\n {\"role\": \"assistant\", \"content\": \"Hello, John Doe. How can I help you today?\"},\n {\"role\": \"user\", \"content\": \"What's my name?\"},\n ],\n \"answer\": \"John Doe\"\n }" + description: Purpose of the dataset. Each purpose has a required input data schema. DefaultRAGQueryGeneratorConfig: properties: type: @@ -8615,7 +8665,7 @@ components: default: ' ' type: object title: DefaultRAGQueryGeneratorConfig - description: "Configuration for the default RAG query generator.\n\n:param type: Type of query generator, always 'default'\n:param separator: String separator used to join query terms" + description: Configuration for the default RAG query generator. Document: properties: content: @@ -8649,7 +8699,7 @@ components: - content - mime_type title: Document - description: "A document to be used by an agent.\n\n:param content: The content of the document.\n:param mime_type: The MIME type of the document." + description: A document to be used by an agent. EfficiencyConfig: properties: enable_activation_checkpointing: @@ -8670,7 +8720,7 @@ components: type: boolean type: object title: EfficiencyConfig - description: "Configuration for memory and compute efficiency optimizations.\n\n:param enable_activation_checkpointing: (Optional) Whether to use activation checkpointing to reduce memory usage\n:param enable_activation_offloading: (Optional) Whether to offload activations to CPU to save GPU memory\n:param memory_efficient_fsdp_wrap: (Optional) Whether to use memory-efficient FSDP wrapping\n:param fsdp_cpu_offload: (Optional) Whether to offload FSDP parameters to CPU" + description: Configuration for memory and compute efficiency optimizations. Errors: properties: data: @@ -8702,7 +8752,7 @@ components: - generations - scores title: EvaluateResponse - description: "The response from an evaluation.\n\n:param generations: The generations from the evaluation.\n:param scores: The scores from the evaluation." + description: The response from an evaluation. GrammarResponseFormat: properties: type: @@ -8718,7 +8768,7 @@ components: required: - bnf title: GrammarResponseFormat - description: "Configuration for grammar-guided response generation.\n\n:param type: Must be \"grammar\" to identify this format type\n:param bnf: The BNF grammar specification the response should conform to" + description: Configuration for grammar-guided response generation. GreedySamplingStrategy: properties: type: @@ -8728,7 +8778,7 @@ components: default: greedy type: object title: GreedySamplingStrategy - description: "Greedy sampling strategy that selects the highest probability token at each step.\n\n:param type: Must be \"greedy\" to identify this sampling strategy" + description: Greedy sampling strategy that selects the highest probability token at each step. HealthInfo: properties: status: @@ -8737,7 +8787,7 @@ components: required: - status title: HealthInfo - description: "Health status information for the service.\n\n:param status: Current health status of the service" + description: Health status information for the service. HealthStatus: type: string enum: @@ -8758,7 +8808,7 @@ components: required: - image title: ImageContentItem - description: "A image content item\n\n:param type: Discriminator type of the content item. Always \"image\"\n:param image: Image as a base64 encoded string or an URL" + description: A image content item ImageContentItem-Output: properties: type: @@ -8772,7 +8822,7 @@ components: required: - image title: ImageContentItem - description: "A image content item\n\n:param type: Discriminator type of the content item. Always \"image\"\n:param image: Image as a base64 encoded string or an URL" + description: A image content item InferenceStep-Output: properties: turn_id: @@ -8802,7 +8852,7 @@ components: - step_id - model_response title: InferenceStep - description: "An inference step in an agent turn.\n\n:param model_response: The response from the LLM." + description: An inference step in an agent turn. InputTokensDetails: properties: cached_tokens: @@ -8825,7 +8875,7 @@ components: - job_id - status title: Job - description: "A job execution instance with status tracking.\n\n:param job_id: Unique identifier for the job\n:param status: Current execution status of the job" + description: A job execution instance with status tracking. JobStatus: type: string enum: @@ -8835,7 +8885,7 @@ components: - scheduled - cancelled title: JobStatus - description: "Status of a job execution.\n:cvar completed: Job has finished successfully\n:cvar in_progress: Job is currently running\n:cvar failed: Job has failed during execution\n:cvar scheduled: Job is scheduled but not yet started\n:cvar cancelled: Job was cancelled before completion" + description: Status of a job execution. JsonSchemaResponseFormat: properties: type: @@ -8851,7 +8901,7 @@ components: required: - json_schema title: JsonSchemaResponseFormat - description: "Configuration for JSON schema-guided response generation.\n\n:param type: Must be \"json_schema\" to identify this format type\n:param json_schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model." + description: Configuration for JSON schema-guided response generation. JsonType: properties: type: @@ -8861,7 +8911,7 @@ components: default: json type: object title: JsonType - description: "Parameter type for JSON values.\n\n:param type: Discriminator type. Always \"json\"" + description: Parameter type for JSON values. LLMAsJudgeScoringFnParams: properties: type: @@ -8891,7 +8941,7 @@ components: required: - judge_model title: LLMAsJudgeScoringFnParams - description: "Parameters for LLM-as-judge scoring function configuration.\n:param type: The type of scoring function parameters, always llm_as_judge\n:param judge_model: Identifier of the LLM model to use as a judge for scoring\n:param prompt_template: (Optional) Custom prompt template for the judge model\n:param judge_score_regexes: Regexes to extract the answer from generated response\n:param aggregation_functions: Aggregation functions to apply to the scores of each row" + description: Parameters for LLM-as-judge scoring function configuration. LLMRAGQueryGeneratorConfig: properties: type: @@ -8910,7 +8960,7 @@ components: - model - template title: LLMRAGQueryGeneratorConfig - description: "Configuration for the LLM-based RAG query generator.\n\n:param type: Type of query generator, always 'llm'\n:param model: Name of the language model to use for query generation\n:param template: Template string for formatting the query generation prompt" + description: Configuration for the LLM-based RAG query generator. ListBenchmarksResponse: properties: data: @@ -8933,7 +8983,7 @@ components: required: - data title: ListDatasetsResponse - description: "Response from listing datasets.\n\n:param data: List of datasets" + description: Response from listing datasets. ListModelsResponse: properties: data: @@ -8979,7 +9029,7 @@ components: required: - data title: ListProvidersResponse - description: "Response containing a list of all available providers.\n\n:param data: List of provider information objects" + description: Response containing a list of all available providers. ListRoutesResponse: properties: data: @@ -8991,7 +9041,7 @@ components: required: - data title: ListRoutesResponse - description: "Response containing a list of all available API routes.\n\n:param data: List of available route information objects" + description: Response containing a list of all available API routes. ListScoringFunctionsResponse: properties: data: @@ -9025,7 +9075,7 @@ components: required: - data title: ListToolGroupsResponse - description: "Response containing a list of tool groups.\n\n:param data: List of tool groups" + description: Response containing a list of tool groups. MCPListToolsTool: properties: input_schema: @@ -9043,7 +9093,7 @@ components: - input_schema - name title: MCPListToolsTool - description: "Tool definition returned by MCP list tools operation.\n\n:param input_schema: JSON schema defining the tool's input parameters\n:param name: Name of the tool\n:param description: (Optional) Description of what the tool does" + description: Tool definition returned by MCP list tools operation. MemoryRetrievalStep-Output: properties: turn_id: @@ -9097,7 +9147,7 @@ components: - vector_store_ids - inserted_context title: MemoryRetrievalStep - description: "A memory retrieval step in an agent turn.\n\n:param vector_store_ids: The IDs of the vector databases to retrieve context from.\n:param inserted_context: The context retrieved from the vector databases." + description: A memory retrieval step in an agent turn. Model: properties: identifier: @@ -9130,7 +9180,7 @@ components: - identifier - provider_id title: Model - description: "A model resource representing an AI model registered in Llama Stack.\n\n:param type: The resource type, always 'model' for model resources\n:param model_type: The type of model (LLM or embedding model)\n:param metadata: Any additional metadata for this model\n:param identifier: Unique identifier for this resource in llama stack\n:param provider_resource_id: Unique identifier for this resource in the provider\n:param provider_id: ID of the provider that owns this resource" + description: A model resource representing an AI model registered in Llama Stack. ModelCandidate: properties: type: @@ -9150,7 +9200,7 @@ components: - model - sampling_params title: ModelCandidate - description: "A model candidate for evaluation.\n\n:param model: The model ID to evaluate.\n:param sampling_params: The sampling parameters for the model.\n:param system_message: (Optional) The system message providing instructions or context to the model." + description: A model candidate for evaluation. ModelType: type: string enum: @@ -9158,7 +9208,7 @@ components: - embedding - rerank title: ModelType - description: "Enumeration of supported model types in Llama Stack.\n:cvar llm: Large language model for text generation and completion\n:cvar embedding: Embedding model for converting text to vector representations\n:cvar rerank: Reranking model for reordering documents based on their relevance to a query" + description: Enumeration of supported model types in Llama Stack. ModerationObject: properties: id: @@ -9178,7 +9228,7 @@ components: - model - results title: ModerationObject - description: "A moderation object.\n:param id: The unique identifier for the moderation request.\n:param model: The model used to generate the moderation results.\n:param results: A list of moderation objects" + description: A moderation object. ModerationObjectResults: properties: flagged: @@ -9212,7 +9262,7 @@ components: required: - flagged title: ModerationObjectResults - description: "A moderation object.\n:param flagged: Whether any of the below categories are flagged.\n:param categories: A list of the categories, and whether they are flagged or not.\n:param category_applied_input_types: A list of the categories along with the input type(s) that the score applies to.\n:param category_scores: A list of the categories along with their scores as predicted by model." + description: A moderation object. NumberType: properties: type: @@ -9222,7 +9272,7 @@ components: default: number type: object title: NumberType - description: "Parameter type for numeric values.\n\n:param type: Discriminator type. Always \"number\"" + description: Parameter type for numeric values. ObjectType: properties: type: @@ -9232,7 +9282,7 @@ components: default: object type: object title: ObjectType - description: "Parameter type for object values.\n\n:param type: Discriminator type. Always \"object\"" + description: Parameter type for object values. OpenAIAssistantMessageParam-Input: properties: role: @@ -9257,7 +9307,7 @@ components: type: array type: object title: OpenAIAssistantMessageParam - description: "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param name: (Optional) The name of the assistant message participant.\n:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. OpenAIAssistantMessageParam-Output: properties: role: @@ -9282,7 +9332,7 @@ components: type: array type: object title: OpenAIAssistantMessageParam - description: "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param name: (Optional) The name of the assistant message participant.\n:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. OpenAIChatCompletion: properties: id: @@ -9313,7 +9363,7 @@ components: - created - model title: OpenAIChatCompletion - description: "Response from an OpenAI-compatible chat completion request.\n\n:param id: The ID of the chat completion\n:param choices: List of choices\n:param object: The object type, which will be \"chat.completion\"\n:param created: The Unix timestamp in seconds when the chat completion was created\n:param model: The model that was used to generate the chat completion\n:param usage: Token usage information for the completion" + description: Response from an OpenAI-compatible chat completion request. OpenAIChatCompletionContentPartImageParam: properties: type: @@ -9327,7 +9377,7 @@ components: required: - image_url title: OpenAIChatCompletionContentPartImageParam - description: "Image content part for OpenAI-compatible chat completion messages.\n\n:param type: Must be \"image_url\" to identify this as image content\n:param image_url: Image URL specification and processing details" + description: Image content part for OpenAI-compatible chat completion messages. OpenAIChatCompletionContentPartTextParam: properties: type: @@ -9342,7 +9392,7 @@ components: required: - text title: OpenAIChatCompletionContentPartTextParam - description: "Text content part for OpenAI-compatible chat completion messages.\n\n:param type: Must be \"text\" to identify this as text content\n:param text: The text content of the message" + description: Text content part for OpenAI-compatible chat completion messages. OpenAIChatCompletionRequestWithExtraBody: properties: model: @@ -9464,7 +9514,7 @@ components: - model - messages title: OpenAIChatCompletionRequestWithExtraBody - description: "Request parameters for OpenAI-compatible chat completion endpoint.\n\n:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.\n:param messages: List of messages in the conversation.\n:param frequency_penalty: (Optional) The penalty for repeated tokens.\n:param function_call: (Optional) The function call to use.\n:param functions: (Optional) List of functions to use.\n:param logit_bias: (Optional) The logit bias to use.\n:param logprobs: (Optional) The log probabilities to use.\n:param max_completion_tokens: (Optional) The maximum number of tokens to generate.\n:param max_tokens: (Optional) The maximum number of tokens to generate.\n:param n: (Optional) The number of completions to generate.\n:param parallel_tool_calls: (Optional) Whether to parallelize tool calls.\n:param presence_penalty: (Optional) The penalty for repeated tokens.\n:param response_format: (Optional) The response format to use.\n:param seed: (Optional) The seed to use.\n:param stop: (Optional) The stop tokens to use.\n:param stream: (Optional) Whether to stream the response.\n:param stream_options: (Optional) The stream options to use.\n:param temperature: (Optional) The temperature to use.\n:param tool_choice: (Optional) The tool choice to use.\n:param tools: (Optional) The tools to use.\n:param top_logprobs: (Optional) The top log probabilities to use.\n:param top_p: (Optional) The top p to use.\n:param user: (Optional) The user to use." + description: Request parameters for OpenAI-compatible chat completion endpoint. OpenAIChatCompletionToolCall: properties: index: @@ -9482,7 +9532,7 @@ components: $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' type: object title: OpenAIChatCompletionToolCall - description: "Tool call specification for OpenAI-compatible chat completion responses.\n\n:param index: (Optional) Index of the tool call in the list\n:param id: (Optional) Unique identifier for the tool call\n:param type: Must be \"function\" to identify this as a function call\n:param function: (Optional) Function call details" + description: Tool call specification for OpenAI-compatible chat completion responses. OpenAIChatCompletionToolCallFunction: properties: name: @@ -9493,7 +9543,7 @@ components: type: string type: object title: OpenAIChatCompletionToolCallFunction - description: "Function call details for OpenAI-compatible tool calls.\n\n:param name: (Optional) Name of the function to call\n:param arguments: (Optional) Arguments to pass to the function as a JSON string" + description: Function call details for OpenAI-compatible tool calls. OpenAIChatCompletionUsage: properties: prompt_tokens: @@ -9515,7 +9565,7 @@ components: - completion_tokens - total_tokens title: OpenAIChatCompletionUsage - description: "Usage information for OpenAI chat completion.\n\n:param prompt_tokens: Number of tokens in the prompt\n:param completion_tokens: Number of tokens in the completion\n:param total_tokens: Total tokens used (prompt + completion)\n:param input_tokens_details: Detailed breakdown of input token usage\n:param output_tokens_details: Detailed breakdown of output token usage" + description: Usage information for OpenAI chat completion. OpenAIChatCompletionUsageCompletionTokensDetails: properties: reasoning_tokens: @@ -9523,7 +9573,7 @@ components: type: integer type: object title: OpenAIChatCompletionUsageCompletionTokensDetails - description: "Token details for output tokens in OpenAI chat completion usage.\n\n:param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)" + description: Token details for output tokens in OpenAI chat completion usage. OpenAIChatCompletionUsagePromptTokensDetails: properties: cached_tokens: @@ -9531,7 +9581,7 @@ components: type: integer type: object title: OpenAIChatCompletionUsagePromptTokensDetails - description: "Token details for prompt tokens in OpenAI chat completion usage.\n\n:param cached_tokens: Number of tokens retrieved from cache" + description: Token details for prompt tokens in OpenAI chat completion usage. OpenAIChoice-Output: properties: message: @@ -9564,7 +9614,7 @@ components: - finish_reason - index title: OpenAIChoice - description: "A choice from an OpenAI-compatible chat completion response.\n\n:param message: The message from the model\n:param finish_reason: The reason the model stopped generating\n:param index: The index of the choice\n:param logprobs: (Optional) The log probabilities for the tokens in the message" + description: A choice from an OpenAI-compatible chat completion response. OpenAIChoiceLogprobs-Output: properties: content: @@ -9579,7 +9629,7 @@ components: type: array type: object title: OpenAIChoiceLogprobs - description: "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.\n\n:param content: (Optional) The log probabilities for the tokens in the message\n:param refusal: (Optional) The log probabilities for the tokens in the message" + description: The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response. OpenAICompletion: properties: id: @@ -9608,7 +9658,7 @@ components: - created - model title: OpenAICompletion - description: "Response from an OpenAI-compatible completion request.\n\n:id: The ID of the completion\n:choices: List of choices\n:created: The Unix timestamp in seconds when the completion was created\n:model: The model that was used to generate the completion\n:object: The object type, which will be \"text_completion\"" + description: Response from an OpenAI-compatible completion request. OpenAICompletionChoice-Output: properties: finish_reason: @@ -9628,7 +9678,7 @@ components: - text - index title: OpenAICompletionChoice - description: "A choice from an OpenAI-compatible completion response.\n\n:finish_reason: The reason the model stopped generating\n:text: The text of the choice\n:index: The index of the choice\n:logprobs: (Optional) The log probabilities for the tokens in the choice" + description: A choice from an OpenAI-compatible completion response. OpenAICompletionRequestWithExtraBody: properties: model: @@ -9710,7 +9760,7 @@ components: - model - prompt title: OpenAICompletionRequestWithExtraBody - description: "Request parameters for OpenAI-compatible completion endpoint.\n\n:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.\n:param prompt: The prompt to generate a completion for.\n:param best_of: (Optional) The number of completions to generate.\n:param echo: (Optional) Whether to echo the prompt.\n:param frequency_penalty: (Optional) The penalty for repeated tokens.\n:param logit_bias: (Optional) The logit bias to use.\n:param logprobs: (Optional) The log probabilities to use.\n:param max_tokens: (Optional) The maximum number of tokens to generate.\n:param n: (Optional) The number of completions to generate.\n:param presence_penalty: (Optional) The penalty for repeated tokens.\n:param seed: (Optional) The seed to use.\n:param stop: (Optional) The stop tokens to use.\n:param stream: (Optional) Whether to stream the response.\n:param stream_options: (Optional) The stream options to use.\n:param temperature: (Optional) The temperature to use.\n:param top_p: (Optional) The top p to use.\n:param user: (Optional) The user to use.\n:param suffix: (Optional) The suffix that should be appended to the completion." + description: Request parameters for OpenAI-compatible completion endpoint. OpenAICreateVectorStoreFileBatchRequestWithExtraBody: properties: file_ids: @@ -9737,7 +9787,7 @@ components: required: - file_ids title: OpenAICreateVectorStoreFileBatchRequestWithExtraBody - description: "Request to create a vector store file batch with extra_body support.\n\n:param file_ids: A list of File IDs that the vector store should use\n:param attributes: (Optional) Key-value attributes to store with the files\n:param chunking_strategy: (Optional) The chunking strategy used to chunk the file(s). Defaults to auto" + description: Request to create a vector store file batch with extra_body support. OpenAICreateVectorStoreRequestWithExtraBody: properties: name: @@ -9763,7 +9813,7 @@ components: additionalProperties: true type: object title: OpenAICreateVectorStoreRequestWithExtraBody - description: "Request to create a vector store with extra_body support.\n\n:param name: (Optional) A name for the vector store\n:param file_ids: List of file IDs to include in the vector store\n:param expires_after: (Optional) Expiration policy for the vector store\n:param chunking_strategy: (Optional) Strategy for splitting files into chunks\n:param metadata: Set of key-value pairs that can be attached to the vector store" + description: Request to create a vector store with extra_body support. OpenAIDeveloperMessageParam: properties: role: @@ -9785,7 +9835,7 @@ components: required: - content title: OpenAIDeveloperMessageParam - description: "A message from the developer in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"developer\" to identify this as a developer message\n:param content: The content of the developer message\n:param name: (Optional) The name of the developer message participant." + description: A message from the developer in an OpenAI-compatible chat completion request. OpenAIEmbeddingData: properties: object: @@ -9808,7 +9858,7 @@ components: - embedding - index title: OpenAIEmbeddingData - description: "A single embedding data object from an OpenAI-compatible embeddings response.\n\n:param object: The object type, which will be \"embedding\"\n:param embedding: The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")\n:param index: The index of the embedding in the input list" + description: A single embedding data object from an OpenAI-compatible embeddings response. OpenAIEmbeddingUsage: properties: prompt_tokens: @@ -9822,7 +9872,7 @@ components: - prompt_tokens - total_tokens title: OpenAIEmbeddingUsage - description: "Usage information for an OpenAI-compatible embeddings response.\n\n:param prompt_tokens: The number of tokens in the input\n:param total_tokens: The total number of tokens used" + description: Usage information for an OpenAI-compatible embeddings response. OpenAIEmbeddingsRequestWithExtraBody: properties: model: @@ -9851,7 +9901,7 @@ components: - model - input title: OpenAIEmbeddingsRequestWithExtraBody - description: "Request parameters for OpenAI-compatible embeddings endpoint.\n\n:param model: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.\n:param input: Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings.\n:param encoding_format: (Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\".\n:param dimensions: (Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n:param user: (Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse." + description: Request parameters for OpenAI-compatible embeddings endpoint. OpenAIEmbeddingsResponse: properties: object: @@ -9875,7 +9925,7 @@ components: - model - usage title: OpenAIEmbeddingsResponse - description: "Response from an OpenAI-compatible embeddings request.\n\n:param object: The object type, which will be \"list\"\n:param data: List of embedding data objects\n:param model: The model that was used to generate the embeddings\n:param usage: Usage information" + description: Response from an OpenAI-compatible embeddings request. OpenAIFile: properties: type: @@ -9935,7 +9985,7 @@ components: - filename - purpose title: OpenAIFileObject - description: "OpenAI File object as defined in the OpenAI Files API.\n\n:param object: The object type, which is always \"file\"\n:param id: The file identifier, which can be referenced in the API endpoints\n:param bytes: The size of the file, in bytes\n:param created_at: The Unix timestamp (in seconds) for when the file was created\n:param expires_at: The Unix timestamp (in seconds) for when the file expires\n:param filename: The name of the file\n:param purpose: The intended purpose of the file" + description: OpenAI File object as defined in the OpenAI Files API. OpenAIFilePurpose: type: string enum: @@ -9955,7 +10005,7 @@ components: required: - url title: OpenAIImageURL - description: "Image URL specification for OpenAI-compatible chat completion messages.\n\n:param url: URL of the image to include in the message\n:param detail: (Optional) Level of detail for image processing. Can be \"low\", \"high\", or \"auto\"" + description: Image URL specification for OpenAI-compatible chat completion messages. OpenAIJSONSchema: properties: name: @@ -9973,7 +10023,7 @@ components: type: object type: object title: OpenAIJSONSchema - description: "JSON schema specification for OpenAI-compatible structured response format.\n\n:param name: Name of the schema\n:param description: (Optional) Description of the schema\n:param strict: (Optional) Whether to enforce strict adherence to the schema\n:param schema: (Optional) The JSON schema definition" + description: JSON schema specification for OpenAI-compatible structured response format. OpenAIResponseAnnotationCitation: properties: type: @@ -10000,7 +10050,7 @@ components: - title - url title: OpenAIResponseAnnotationCitation - description: "URL citation annotation for referencing external web resources.\n\n:param type: Annotation type identifier, always \"url_citation\"\n:param end_index: End position of the citation span in the content\n:param start_index: Start position of the citation span in the content\n:param title: Title of the referenced web resource\n:param url: URL of the referenced web resource" + description: URL citation annotation for referencing external web resources. OpenAIResponseAnnotationContainerFileCitation: properties: type: @@ -10053,7 +10103,7 @@ components: - filename - index title: OpenAIResponseAnnotationFileCitation - description: "File citation annotation for referencing specific files in response content.\n\n:param type: Annotation type identifier, always \"file_citation\"\n:param file_id: Unique identifier of the referenced file\n:param filename: Name of the referenced file\n:param index: Position index of the citation within the content" + description: File citation annotation for referencing specific files in response content. OpenAIResponseAnnotationFilePath: properties: type: @@ -10086,7 +10136,7 @@ components: required: - refusal title: OpenAIResponseContentPartRefusal - description: "Refusal content within a streamed response part.\n\n:param type: Content part type identifier, always \"refusal\"\n:param refusal: Refusal text supplied by the model" + description: Refusal content within a streamed response part. OpenAIResponseError: properties: code: @@ -10100,7 +10150,7 @@ components: - code - message title: OpenAIResponseError - description: "Error details for failed OpenAI response requests.\n\n:param code: Error code identifying the type of failure\n:param message: Human-readable error message describing the failure" + description: Error details for failed OpenAI response requests. OpenAIResponseFormatJSONObject: properties: type: @@ -10110,7 +10160,7 @@ components: default: json_object type: object title: OpenAIResponseFormatJSONObject - description: "JSON object response format for OpenAI-compatible chat completion requests.\n\n:param type: Must be \"json_object\" to indicate generic JSON object response format" + description: JSON object response format for OpenAI-compatible chat completion requests. OpenAIResponseFormatJSONSchema: properties: type: @@ -10124,7 +10174,7 @@ components: required: - json_schema title: OpenAIResponseFormatJSONSchema - description: "JSON schema response format for OpenAI-compatible chat completion requests.\n\n:param type: Must be \"json_schema\" to indicate structured JSON response format\n:param json_schema: The JSON schema specification for the response" + description: JSON schema response format for OpenAI-compatible chat completion requests. OpenAIResponseFormatText: properties: type: @@ -10134,7 +10184,7 @@ components: default: text type: object title: OpenAIResponseFormatText - description: "Text response format for OpenAI-compatible chat completion requests.\n\n:param type: Must be \"text\" to indicate plain text response format" + description: Text response format for OpenAI-compatible chat completion requests. OpenAIResponseInputFunctionToolCallOutput: properties: call_id: @@ -10181,7 +10231,7 @@ components: type: string type: object title: OpenAIResponseInputMessageContentFile - description: "File content for input messages in OpenAI response format.\n\n:param type: The type of the input item. Always `input_file`.\n:param file_data: The data of the file to be sent to the model.\n:param file_id: (Optional) The ID of the file to be sent to the model.\n:param file_url: The URL of the file to be sent to the model.\n:param filename: The name of the file to be sent to the model." + description: File content for input messages in OpenAI response format. OpenAIResponseInputMessageContentImage: properties: detail: @@ -10207,7 +10257,7 @@ components: type: string type: object title: OpenAIResponseInputMessageContentImage - description: "Image content for input messages in OpenAI response format.\n\n:param detail: Level of detail for image processing, can be \"low\", \"high\", or \"auto\"\n:param type: Content type identifier, always \"input_image\"\n:param file_id: (Optional) The ID of the file to be sent to the model.\n:param image_url: (Optional) URL of the image content" + description: Image content for input messages in OpenAI response format. OpenAIResponseInputMessageContentText: properties: text: @@ -10222,7 +10272,7 @@ components: required: - text title: OpenAIResponseInputMessageContentText - description: "Text content for input messages in OpenAI response format.\n\n:param text: The text content of the input message\n:param type: Content type identifier, always \"input_text\"" + description: Text content for input messages in OpenAI response format. OpenAIResponseInputToolFileSearch: properties: type: @@ -10251,7 +10301,7 @@ components: required: - vector_store_ids title: OpenAIResponseInputToolFileSearch - description: "File search tool configuration for OpenAI response inputs.\n\n:param type: Tool type identifier, always \"file_search\"\n:param vector_store_ids: List of vector store identifiers to search within\n:param filters: (Optional) Additional filters to apply to the search\n:param max_num_results: (Optional) Maximum number of search results to return (1-50)\n:param ranking_options: (Optional) Options for ranking and scoring search results" + description: File search tool configuration for OpenAI response inputs. OpenAIResponseInputToolFunction: properties: type: @@ -10277,7 +10327,7 @@ components: - name - parameters title: OpenAIResponseInputToolFunction - description: "Function tool configuration for OpenAI response inputs.\n\n:param type: Tool type identifier, always \"function\"\n:param name: Name of the function that can be called\n:param description: (Optional) Description of what the function does\n:param parameters: (Optional) JSON schema defining the function's parameters\n:param strict: (Optional) Whether to enforce strict parameter validation" + description: Function tool configuration for OpenAI response inputs. OpenAIResponseInputToolMCP: properties: type: @@ -10316,7 +10366,7 @@ components: - server_label - server_url title: OpenAIResponseInputToolMCP - description: "Model Context Protocol (MCP) tool configuration for OpenAI response inputs.\n\n:param type: Tool type identifier, always \"mcp\"\n:param server_label: Label to identify this MCP server\n:param server_url: URL endpoint of the MCP server\n:param headers: (Optional) HTTP headers to include when connecting to the server\n:param require_approval: Approval requirement for tool calls (\"always\", \"never\", or filter)\n:param allowed_tools: (Optional) Restriction on which tools can be used from this server" + description: Model Context Protocol (MCP) tool configuration for OpenAI response inputs. OpenAIResponseInputToolWebSearch: properties: type: @@ -10336,7 +10386,7 @@ components: pattern: ^low|medium|high$ type: object title: OpenAIResponseInputToolWebSearch - description: "Web search tool configuration for OpenAI response inputs.\n\n:param type: Web search tool type variant to use\n:param search_context_size: (Optional) Size of search context, must be \"low\", \"medium\", or \"high\"" + description: Web search tool configuration for OpenAI response inputs. OpenAIResponseMCPApprovalRequest: properties: arguments: @@ -10598,7 +10648,7 @@ components: - output - status title: OpenAIResponseObject - description: "Complete OpenAI response object containing generation results and metadata.\n\n:param created_at: Unix timestamp when the response was created\n:param error: (Optional) Error details if the response generation failed\n:param id: Unique identifier for this response\n:param model: Model identifier used for generation\n:param object: Object type identifier, always \"response\"\n:param output: List of generated output items (messages, tool calls, etc.)\n:param parallel_tool_calls: Whether tool calls can be executed in parallel\n:param previous_response_id: (Optional) ID of the previous response in a conversation\n:param prompt: (Optional) Reference to a prompt template and its variables.\n:param status: Current status of the response generation\n:param temperature: (Optional) Sampling temperature used for generation\n:param text: Text formatting configuration for the response\n:param top_p: (Optional) Nucleus sampling parameter used for generation\n:param tools: (Optional) An array of tools the model may call while generating a response.\n:param truncation: (Optional) Truncation strategy applied to the response\n:param usage: (Optional) Token usage information for the response\n:param instructions: (Optional) System message inserted into the model's context" + description: Complete OpenAI response object containing generation results and metadata. OpenAIResponseOutputMessageContentOutputText: properties: text: @@ -10658,7 +10708,7 @@ components: - queries - status title: OpenAIResponseOutputMessageFileSearchToolCall - description: "File search tool call output message for OpenAI responses.\n\n:param id: Unique identifier for this tool call\n:param queries: List of search queries executed\n:param status: Current status of the file search operation\n:param type: Tool call type identifier, always \"file_search_call\"\n:param results: (Optional) Search results returned by the file search operation" + description: File search tool call output message for OpenAI responses. OpenAIResponseOutputMessageFileSearchToolCallResults: properties: attributes: @@ -10685,7 +10735,7 @@ components: - score - text title: OpenAIResponseOutputMessageFileSearchToolCallResults - description: "Search results returned by the file search operation.\n\n:param attributes: (Optional) Key-value attributes associated with the file\n:param file_id: Unique identifier of the file containing the result\n:param filename: Name of the file containing the result\n:param score: Relevance score for this search result (between 0 and 1)\n:param text: Text content of the search result" + description: Search results returned by the file search operation. OpenAIResponseOutputMessageFunctionToolCall: properties: call_id: @@ -10714,7 +10764,7 @@ components: - name - arguments title: OpenAIResponseOutputMessageFunctionToolCall - description: "Function tool call output message for OpenAI responses.\n\n:param call_id: Unique identifier for the function call\n:param name: Name of the function being called\n:param arguments: JSON string containing the function arguments\n:param type: Tool call type identifier, always \"function_call\"\n:param id: (Optional) Additional identifier for the tool call\n:param status: (Optional) Current status of the function call execution" + description: Function tool call output message for OpenAI responses. OpenAIResponseOutputMessageMCPCall: properties: id: @@ -10747,7 +10797,7 @@ components: - name - server_label title: OpenAIResponseOutputMessageMCPCall - description: "Model Context Protocol (MCP) call output message for OpenAI responses.\n\n:param id: Unique identifier for this MCP call\n:param type: Tool call type identifier, always \"mcp_call\"\n:param arguments: JSON string containing the MCP call arguments\n:param name: Name of the MCP method being called\n:param server_label: Label identifying the MCP server handling the call\n:param error: (Optional) Error message if the MCP call failed\n:param output: (Optional) Output result from the successful MCP call" + description: Model Context Protocol (MCP) call output message for OpenAI responses. OpenAIResponseOutputMessageMCPListTools: properties: id: @@ -10772,7 +10822,7 @@ components: - server_label - tools title: OpenAIResponseOutputMessageMCPListTools - description: "MCP list tools output message containing available tools from an MCP server.\n\n:param id: Unique identifier for this MCP list tools operation\n:param type: Tool call type identifier, always \"mcp_list_tools\"\n:param server_label: Label identifying the MCP server providing the tools\n:param tools: List of available tools provided by the MCP server" + description: MCP list tools output message containing available tools from an MCP server. OpenAIResponseOutputMessageWebSearchToolCall: properties: id: @@ -10791,7 +10841,7 @@ components: - id - status title: OpenAIResponseOutputMessageWebSearchToolCall - description: "Web search tool call output message for OpenAI responses.\n\n:param id: Unique identifier for this tool call\n:param status: Current status of the web search operation\n:param type: Tool call type identifier, always \"web_search_call\"" + description: Web search tool call output message for OpenAI responses. OpenAIResponsePrompt: properties: id: @@ -10818,14 +10868,14 @@ components: required: - id title: OpenAIResponsePrompt - description: "OpenAI compatible Prompt object that is used in OpenAI responses.\n\n:param id: Unique identifier of the prompt template\n:param variables: Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types\nlike images or files.\n:param version: Version number of the prompt to use (defaults to latest if not specified)" + description: OpenAI compatible Prompt object that is used in OpenAI responses. OpenAIResponseText: properties: format: $ref: '#/components/schemas/OpenAIResponseTextFormat' type: object title: OpenAIResponseText - description: "Text response configuration for OpenAI responses.\n\n:param format: (Optional) Text format configuration specifying output format requirements" + description: Text response configuration for OpenAI responses. OpenAIResponseTextFormat: properties: type: @@ -10852,7 +10902,7 @@ components: type: boolean type: object title: OpenAIResponseTextFormat - description: "Configuration for Responses API text format.\n\n:param type: Must be \"text\", \"json_schema\", or \"json_object\" to identify the format type\n:param name: The name of the response format. Only used for json_schema.\n:param schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema.\n:param description: (Optional) A description of the response format. Only used for json_schema.\n:param strict: (Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema." + description: Configuration for Responses API text format. OpenAIResponseToolMCP: properties: type: @@ -10874,7 +10924,7 @@ components: required: - server_label title: OpenAIResponseToolMCP - description: "Model Context Protocol (MCP) tool configuration for OpenAI response object.\n\n:param type: Tool type identifier, always \"mcp\"\n:param server_label: Label to identify this MCP server\n:param allowed_tools: (Optional) Restriction on which tools can be used from this server" + description: Model Context Protocol (MCP) tool configuration for OpenAI response object. OpenAIResponseUsage: properties: input_tokens: @@ -10896,7 +10946,7 @@ components: - output_tokens - total_tokens title: OpenAIResponseUsage - description: "Usage information for OpenAI response.\n\n:param input_tokens: Number of tokens in the input\n:param output_tokens: Number of tokens in the output\n:param total_tokens: Total tokens used (input + output)\n:param input_tokens_details: Detailed breakdown of input token usage\n:param output_tokens_details: Detailed breakdown of output token usage" + description: Usage information for OpenAI response. OpenAIResponseUsageInputTokensDetails: properties: cached_tokens: @@ -10904,7 +10954,7 @@ components: type: integer type: object title: OpenAIResponseUsageInputTokensDetails - description: "Token details for input tokens in OpenAI response usage.\n\n:param cached_tokens: Number of tokens retrieved from cache" + description: Token details for input tokens in OpenAI response usage. OpenAIResponseUsageOutputTokensDetails: properties: reasoning_tokens: @@ -10912,7 +10962,7 @@ components: type: integer type: object title: OpenAIResponseUsageOutputTokensDetails - description: "Token details for output tokens in OpenAI response usage.\n\n:param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)" + description: Token details for output tokens in OpenAI response usage. OpenAISystemMessageParam: properties: role: @@ -10934,7 +10984,7 @@ components: required: - content title: OpenAISystemMessageParam - description: "A system message providing instructions or context to the model.\n\n:param role: Must be \"system\" to identify this as a system message\n:param content: The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions).\n:param name: (Optional) The name of the system message participant." + description: A system message providing instructions or context to the model. OpenAITokenLogProb: properties: token: @@ -10959,7 +11009,7 @@ components: - logprob - top_logprobs title: OpenAITokenLogProb - description: "The log probability for a token from an OpenAI-compatible chat completion response.\n\n:token: The token\n:bytes: (Optional) The bytes for the token\n:logprob: The log probability of the token\n:top_logprobs: The top log probabilities for the token" + description: The log probability for a token from an OpenAI-compatible chat completion response. OpenAIToolMessageParam: properties: role: @@ -10982,7 +11032,7 @@ components: - tool_call_id - content title: OpenAIToolMessageParam - description: "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"tool\" to identify this as a tool response\n:param tool_call_id: Unique identifier for the tool call this response is for\n:param content: The response content from the tool" + description: A message representing the result of a tool invocation in an OpenAI-compatible chat completion request. OpenAITopLogProb: properties: token: @@ -11001,7 +11051,7 @@ components: - token - logprob title: OpenAITopLogProb - description: "The top log probability for a token from an OpenAI-compatible chat completion response.\n\n:token: The token\n:bytes: (Optional) The bytes for the token\n:logprob: The log probability of the token" + description: The top log probability for a token from an OpenAI-compatible chat completion response. OpenAIUserMessageParam-Input: properties: role: @@ -11032,7 +11082,7 @@ components: required: - content title: OpenAIUserMessageParam - description: "A message from the user in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param name: (Optional) The name of the user message participant." + description: A message from the user in an OpenAI-compatible chat completion request. OpenAIUserMessageParam-Output: properties: role: @@ -11063,7 +11113,7 @@ components: required: - content title: OpenAIUserMessageParam - description: "A message from the user in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param name: (Optional) The name of the user message participant." + description: A message from the user in an OpenAI-compatible chat completion request. OptimizerConfig: properties: optimizer_type: @@ -11084,7 +11134,7 @@ components: - weight_decay - num_warmup_steps title: OptimizerConfig - description: "Configuration parameters for the optimization algorithm.\n\n:param optimizer_type: Type of optimizer to use (adam, adamw, or sgd)\n:param lr: Learning rate for the optimizer\n:param weight_decay: Weight decay coefficient for regularization\n:param num_warmup_steps: Number of steps for learning rate warmup" + description: Configuration parameters for the optimization algorithm. OptimizerType: type: string enum: @@ -11092,14 +11142,14 @@ components: - adamw - sgd title: OptimizerType - description: "Available optimizer algorithms for training.\n:cvar adam: Adaptive Moment Estimation optimizer\n:cvar adamw: AdamW optimizer with weight decay\n:cvar sgd: Stochastic Gradient Descent optimizer" + description: Available optimizer algorithms for training. Order: type: string enum: - asc - desc title: Order - description: "Sort order for paginated responses.\n:cvar asc: Ascending order\n:cvar desc: Descending order" + description: Sort order for paginated responses. OutputTokensDetails: properties: reasoning_tokens: @@ -11150,7 +11200,7 @@ components: - version - prompt_id title: Prompt - description: "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack.\n\n:param prompt: The system prompt text with variable placeholders. Variables are only supported when using the Responses API.\n:param version: Version (integer starting at 1, incremented on save)\n:param prompt_id: Unique identifier formatted as 'pmpt_<48-digit-hash>'\n:param variables: List of prompt variable names that can be used in the prompt template\n:param is_default: Boolean indicating whether this version is the default version for this prompt" + description: A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack. ProviderInfo: properties: api: @@ -11178,7 +11228,7 @@ components: - config - health title: ProviderInfo - description: "Information about a registered provider including its configuration and health status.\n\n:param api: The API name this provider implements\n:param provider_id: Unique identifier for the provider\n:param provider_type: The type of provider implementation\n:param config: Configuration parameters for the provider\n:param health: Current health status of the provider" + description: Information about a registered provider including its configuration and health status. QueryChunksResponse: properties: chunks: @@ -11196,7 +11246,7 @@ components: - chunks - scores title: QueryChunksResponse - description: "Response from querying chunks in a vector database.\n\n:param chunks: List of content chunks returned from the query\n:param scores: Relevance scores corresponding to each returned chunk" + description: Response from querying chunks in a vector database. RAGQueryConfig: properties: query_generator_config: @@ -11239,7 +11289,7 @@ components: weighted: '#/components/schemas/WeightedRanker' type: object title: RAGQueryConfig - description: "Configuration for the RAG query generation.\n\n:param query_generator_config: Configuration for the query generator.\n:param max_tokens_in_context: Maximum number of tokens in the context.\n:param max_chunks: Maximum number of chunks to retrieve.\n:param chunk_template: Template for formatting each retrieved chunk in the context.\n Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict).\n Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n\"\n:param mode: Search mode for retrieval—either \"vector\", \"keyword\", or \"hybrid\". Default \"vector\".\n:param ranker: Configuration for the ranker to use in hybrid search. Defaults to RRF ranker." + description: Configuration for the RAG query generation. RAGQueryResult: properties: content: @@ -11270,7 +11320,7 @@ components: title: Metadata type: object title: RAGQueryResult - description: "Result of a RAG query containing retrieved content and metadata.\n\n:param content: (Optional) The retrieved content from the query\n:param metadata: Additional metadata about the query result" + description: Result of a RAG query containing retrieved content and metadata. RAGSearchMode: type: string enum: @@ -11293,7 +11343,7 @@ components: minimum: 0.0 type: object title: RRFRanker - description: "Reciprocal Rank Fusion (RRF) ranker configuration.\n\n:param type: The type of ranker, always \"rrf\"\n:param impact_factor: The impact factor for RRF scoring. Higher values give more weight to higher-ranked results.\n Must be greater than 0" + description: Reciprocal Rank Fusion (RRF) ranker configuration. RegexParserScoringFnParams: properties: type: @@ -11315,7 +11365,7 @@ components: description: Aggregation functions to apply to the scores of each row type: object title: RegexParserScoringFnParams - description: "Parameters for regex parser scoring function configuration.\n:param type: The type of scoring function parameters, always regex_parser\n:param parsing_regexes: Regex to extract the answer from generated response\n:param aggregation_functions: Aggregation functions to apply to the scores of each row" + description: Parameters for regex parser scoring function configuration. RerankData: properties: index: @@ -11329,7 +11379,7 @@ components: - index - relevance_score title: RerankData - description: "A single rerank result from a reranking response.\n\n:param index: The original index of the document in the input list\n:param relevance_score: The relevance score from the model output. Values are inverted when applicable so that higher scores indicate greater relevance." + description: A single rerank result from a reranking response. RerankResponse: properties: data: @@ -11341,7 +11391,7 @@ components: required: - data title: RerankResponse - description: "Response from a reranking request.\n\n:param data: List of rerank result objects, sorted by relevance score (descending)" + description: Response from a reranking request. RouteInfo: properties: route: @@ -11361,7 +11411,7 @@ components: - method - provider_types title: RouteInfo - description: "Information about an API route including its path, method, and implementing providers.\n\n:param route: The API endpoint path\n:param method: HTTP method for the route\n:param provider_types: List of provider types that implement this route" + description: Information about an API route including its path, method, and implementing providers. RowsDataSource: properties: type: @@ -11379,14 +11429,14 @@ components: required: - rows title: RowsDataSource - description: "A dataset stored in rows.\n:param rows: The dataset is stored in rows. E.g.\n - [\n {\"messages\": [{\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}]}\n ]" + description: A dataset stored in rows. RunShieldResponse: properties: violation: $ref: '#/components/schemas/SafetyViolation' type: object title: RunShieldResponse - description: "Response from running a safety shield.\n\n:param violation: (Optional) Safety violation detected by the shield, if any" + description: Response from running a safety shield. SafetyViolation: properties: violation_level: @@ -11402,7 +11452,7 @@ components: required: - violation_level title: SafetyViolation - description: "Details of a safety violation detected by content moderation.\n\n:param violation_level: Severity level of the violation\n:param user_message: (Optional) Message to convey to the user about the violation\n:param metadata: Additional metadata including specific violation codes for debugging and telemetry" + description: Details of a safety violation detected by content moderation. SamplingParams: properties: strategy: @@ -11431,7 +11481,7 @@ components: type: array type: object title: SamplingParams - description: "Sampling parameters.\n\n:param strategy: The sampling strategy.\n:param max_tokens: The maximum number of tokens that can be generated in the completion. The token count of\n your prompt plus max_tokens cannot exceed the model's context length.\n:param repetition_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens\n based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n:param stop: Up to 4 sequences where the API will stop generating further tokens.\n The returned text will not contain the stop sequence." + description: Sampling parameters. ScoreBatchResponse: properties: dataset_id: @@ -11446,7 +11496,7 @@ components: required: - results title: ScoreBatchResponse - description: "Response from batch scoring operations on datasets.\n\n:param dataset_id: (Optional) The identifier of the dataset that was scored\n:param results: A map of scoring function name to ScoringResult" + description: Response from batch scoring operations on datasets. ScoreResponse: properties: results: @@ -11458,7 +11508,7 @@ components: required: - results title: ScoreResponse - description: "The response from scoring.\n\n:param results: A map of scoring function name to ScoringResult." + description: The response from scoring. ScoringFn-Output: properties: identifier: @@ -11532,7 +11582,7 @@ components: - provider_id - return_type title: ScoringFn - description: "A scoring function resource for evaluating model outputs.\n:param type: The resource type, always scoring_function" + description: A scoring function resource for evaluating model outputs. ScoringResult: properties: score_rows: @@ -11550,7 +11600,7 @@ components: - score_rows - aggregated_results title: ScoringResult - description: "A scoring result for a single row.\n\n:param score_rows: The scoring result for each row. Each row is a map of column name to value.\n:param aggregated_results: Map of metric name to aggregated value" + description: A scoring result for a single row. SearchRankingOptions: properties: ranker: @@ -11562,7 +11612,7 @@ components: type: number type: object title: SearchRankingOptions - description: "Options for ranking and filtering search results.\n\n:param ranker: (Optional) Name of the ranking algorithm to use\n:param score_threshold: (Optional) Minimum relevance score threshold for results" + description: Options for ranking and filtering search results. Shield: properties: identifier: @@ -11591,7 +11641,7 @@ components: - identifier - provider_id title: Shield - description: "A safety shield resource that can be used to check content.\n\n:param params: (Optional) Configuration parameters for the shield\n:param type: The resource type, always shield" + description: A safety shield resource that can be used to check content. ShieldCallStep-Output: properties: turn_id: @@ -11621,7 +11671,7 @@ components: - step_id - violation title: ShieldCallStep - description: "A shield call step in an agent turn.\n\n:param violation: The violation from the shield call." + description: A shield call step in an agent turn. StopReason: type: string enum: @@ -11638,7 +11688,7 @@ components: default: string type: object title: StringType - description: "Parameter type for string values.\n\n:param type: Discriminator type. Always \"string\"" + description: Parameter type for string values. SystemMessage: properties: role: @@ -11672,14 +11722,14 @@ components: required: - content title: SystemMessage - description: "A system message providing instructions or context to the model.\n\n:param role: Must be \"system\" to identify this as a system message\n:param content: The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." + description: A system message providing instructions or context to the model. SystemMessageBehavior: type: string enum: - append - replace title: SystemMessageBehavior - description: "Config for how to override the default system prompt.\n\n:cvar append: Appends the provided system message to the default system prompt:\n https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_2/#-function-definitions-in-the-system-prompt-\n:cvar replace: Replaces the default system prompt with the provided system message. The system message can include the string\n '{{function_definitions}}' to indicate where the function definitions should be inserted." + description: Config for how to override the default system prompt. TextContentItem: properties: type: @@ -11694,7 +11744,7 @@ components: required: - text title: TextContentItem - description: "A text content item\n\n:param type: Discriminator type of the content item. Always \"text\"\n:param text: Text content" + description: A text content item ToolCall: properties: call_id: @@ -11721,7 +11771,7 @@ components: - required - none title: ToolChoice - description: "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model.\n\n:cvar auto: The model may use tools if it determines that is appropriate.\n:cvar required: The model must use tools.\n:cvar none: The model must not use tools." + description: Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model. ToolConfig: properties: tool_choice: @@ -11737,7 +11787,7 @@ components: $ref: '#/components/schemas/SystemMessageBehavior' type: object title: ToolConfig - description: "Configuration for tool use.\n\n:param tool_choice: (Optional) Whether tool use is automatic, required, or none. Can also specify a tool name to use a specific tool. Defaults to ToolChoice.auto.\n:param tool_prompt_format: (Optional) Instructs the model how to format tool calls. By default, Llama Stack will attempt to use a format that is best adapted to the model.\n - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object.\n - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a tag.\n - `ToolPromptFormat.python_list`: The tool calls are output as Python syntax -- a list of function calls.\n:param system_message_behavior: (Optional) Config for how to override the default system prompt.\n - `SystemMessageBehavior.append`: Appends the provided system message to the default system prompt.\n - `SystemMessageBehavior.replace`: Replaces the default system prompt with the provided system message. The system message can include the string\n '{{function_definitions}}' to indicate where the function definitions should be inserted." + description: Configuration for tool use. ToolDef: properties: toolgroup_id: @@ -11765,7 +11815,7 @@ components: required: - name title: ToolDef - description: "Tool definition used in runtime contexts.\n\n:param name: Name of the tool\n:param description: (Optional) Human-readable description of what the tool does\n:param input_schema: (Optional) JSON Schema for tool inputs (MCP inputSchema)\n:param output_schema: (Optional) JSON Schema for tool outputs (MCP outputSchema)\n:param metadata: (Optional) Additional metadata about the tool\n:param toolgroup_id: (Optional) ID of the tool group this tool belongs to" + description: Tool definition used in runtime contexts. ToolExecutionStep-Output: properties: turn_id: @@ -11804,7 +11854,7 @@ components: - tool_calls - tool_responses title: ToolExecutionStep - description: "A tool execution step in an agent turn.\n\n:param tool_calls: The tool calls to execute.\n:param tool_responses: The tool responses from the tool calls." + description: A tool execution step in an agent turn. ToolGroup: properties: identifier: @@ -11835,7 +11885,7 @@ components: - identifier - provider_id title: ToolGroup - description: "A group of related tools managed together.\n\n:param type: Type of resource, always 'tool_group'\n:param mcp_endpoint: (Optional) Model Context Protocol endpoint for remote tools\n:param args: (Optional) Additional arguments for the tool group" + description: A group of related tools managed together. ToolInvocationResult: properties: content: @@ -11872,7 +11922,7 @@ components: type: object type: object title: ToolInvocationResult - description: "Result of a tool invocation.\n\n:param content: (Optional) The output content from the tool execution\n:param error_message: (Optional) Error message if the tool execution failed\n:param error_code: (Optional) Numeric error code if the tool execution failed\n:param metadata: (Optional) Additional metadata about the tool execution" + description: Result of a tool invocation. ToolPromptFormat: type: string enum: @@ -11880,7 +11930,7 @@ components: - function_tag - python_list title: ToolPromptFormat - description: "Prompt format for calling custom / zero shot tools.\n\n:cvar json: JSON format for calling tools. It takes the form:\n {\n \"type\": \"function\",\n \"function\" : {\n \"name\": \"function_name\",\n \"description\": \"function_description\",\n \"parameters\": {...}\n }\n }\n:cvar function_tag: Function tag format, pseudo-XML. This looks like:\n (parameters)\n\n:cvar python_list: Python list. The output is a valid Python expression that can be\n evaluated to a list. Each element in the list is a function call. Example:\n [\"function_name(param1, param2)\", \"function_name(param1, param2)\"]" + description: Prompt format for calling custom / zero shot tools. ToolResponse-Input: properties: call_id: @@ -11923,7 +11973,7 @@ components: - tool_name - content title: ToolResponse - description: "Response from a tool invocation.\n\n:param call_id: Unique identifier for the tool call this response is for\n:param tool_name: Name of the tool that was invoked\n:param content: The response content from the tool\n:param metadata: (Optional) Additional metadata about the tool response" + description: Response from a tool invocation. ToolResponse-Output: properties: call_id: @@ -11966,7 +12016,7 @@ components: - tool_name - content title: ToolResponse - description: "Response from a tool invocation.\n\n:param call_id: Unique identifier for the tool call this response is for\n:param tool_name: Name of the tool that was invoked\n:param content: The response content from the tool\n:param metadata: (Optional) Additional metadata about the tool response" + description: Response from a tool invocation. ToolResponseMessage-Output: properties: role: @@ -12004,7 +12054,7 @@ components: - call_id - content title: ToolResponseMessage - description: "A message representing the result of a tool invocation.\n\n:param role: Must be \"tool\" to identify this as a tool response\n:param call_id: Unique identifier for the tool call this response is for\n:param content: The response content from the tool" + description: A message representing the result of a tool invocation. TopKSamplingStrategy: properties: type: @@ -12020,7 +12070,7 @@ components: required: - top_k title: TopKSamplingStrategy - description: "Top-k sampling strategy that restricts sampling to the k most likely tokens.\n\n:param type: Must be \"top_k\" to identify this sampling strategy\n:param top_k: Number of top tokens to consider for sampling. Must be at least 1" + description: Top-k sampling strategy that restricts sampling to the k most likely tokens. TopPSamplingStrategy: properties: type: @@ -12040,7 +12090,7 @@ components: required: - temperature title: TopPSamplingStrategy - description: "Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p.\n\n:param type: Must be \"top_p\" to identify this sampling strategy\n:param temperature: Controls randomness in sampling. Higher values increase randomness\n:param top_p: Cumulative probability threshold for nucleus sampling. Defaults to 0.95" + description: Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p. TrainingConfig: properties: n_epochs: @@ -12072,7 +12122,7 @@ components: required: - n_epochs title: TrainingConfig - description: "Comprehensive configuration for the training process.\n\n:param n_epochs: Number of training epochs to run\n:param max_steps_per_epoch: Maximum number of steps to run per epoch\n:param gradient_accumulation_steps: Number of steps to accumulate gradients before updating\n:param max_validation_steps: (Optional) Maximum number of validation steps per epoch\n:param data_config: (Optional) Configuration for data loading and formatting\n:param optimizer_config: (Optional) Configuration for the optimization algorithm\n:param efficiency_config: (Optional) Configuration for memory and compute optimizations\n:param dtype: (Optional) Data type for model parameters (bf16, fp16, fp32)" + description: Comprehensive configuration for the training process. Turn: properties: turn_id: @@ -12128,7 +12178,7 @@ components: - output_message - started_at title: Turn - description: "A single turn in an interaction with an Agentic System.\n\n:param turn_id: Unique identifier for the turn within a session\n:param session_id: Unique identifier for the conversation session\n:param input_messages: List of messages that initiated this turn\n:param steps: Ordered list of processing steps executed during this turn\n:param output_message: The model's generated response containing content and metadata\n:param output_attachments: (Optional) Files or media attached to the agent's response\n:param started_at: Timestamp when the turn began\n:param completed_at: (Optional) Timestamp when the turn finished, if completed" + description: A single turn in an interaction with an Agentic System. URIDataSource: properties: type: @@ -12143,7 +12193,7 @@ components: required: - uri title: URIDataSource - description: "A dataset that can be obtained from a URI.\n:param uri: The dataset can be obtained from a URI. E.g.\n - \"https://mywebsite.com/mydata.jsonl\"\n - \"lsfs://mydata.jsonl\"\n - \"data:csv;base64,{base64_content}\"" + description: A dataset that can be obtained from a URI. URL: properties: uri: @@ -12153,7 +12203,7 @@ components: required: - uri title: URL - description: "A URL reference to external content.\n\n:param uri: The URL string pointing to the resource" + description: A URL reference to external content. UnionType: properties: type: @@ -12163,7 +12213,7 @@ components: default: union type: object title: UnionType - description: "Parameter type for union values.\n\n:param type: Discriminator type. Always \"union\"" + description: Parameter type for union values. UserMessage-Input: properties: role: @@ -12219,7 +12269,7 @@ components: required: - content title: UserMessage - description: "A message from the user in a chat conversation.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param context: (Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future." + description: A message from the user in a chat conversation. UserMessage-Output: properties: role: @@ -12275,7 +12325,7 @@ components: required: - content title: UserMessage - description: "A message from the user in a chat conversation.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param context: (Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future." + description: A message from the user in a chat conversation. VectorStoreChunkingStrategyAuto: properties: type: @@ -12285,7 +12335,7 @@ components: default: auto type: object title: VectorStoreChunkingStrategyAuto - description: "Automatic chunking strategy for vector store files.\n\n:param type: Strategy type, always \"auto\" for automatic chunking" + description: Automatic chunking strategy for vector store files. VectorStoreChunkingStrategyStatic: properties: type: @@ -12299,7 +12349,7 @@ components: required: - static title: VectorStoreChunkingStrategyStatic - description: "Static chunking strategy with configurable parameters.\n\n:param type: Strategy type, always \"static\" for static chunking\n:param static: Configuration parameters for the static chunking strategy" + description: Static chunking strategy with configurable parameters. VectorStoreChunkingStrategyStaticConfig: properties: chunk_overlap_tokens: @@ -12314,7 +12364,7 @@ components: default: 800 type: object title: VectorStoreChunkingStrategyStaticConfig - description: "Configuration for static chunking strategy.\n\n:param chunk_overlap_tokens: Number of tokens to overlap between adjacent chunks\n:param max_chunk_size_tokens: Maximum number of tokens per chunk, must be between 100 and 4096" + description: Configuration for static chunking strategy. VectorStoreContent: properties: type: @@ -12329,7 +12379,7 @@ components: - type - text title: VectorStoreContent - description: "Content item from a vector store file or search result.\n\n:param type: Content type, currently only \"text\" is supported\n:param text: The actual text content" + description: Content item from a vector store file or search result. VectorStoreFileBatchObject: properties: id: @@ -12366,7 +12416,7 @@ components: - status - file_counts title: VectorStoreFileBatchObject - description: "OpenAI Vector Store File Batch object.\n\n:param id: Unique identifier for the file batch\n:param object: Object type identifier, always \"vector_store.file_batch\"\n:param created_at: Timestamp when the file batch was created\n:param vector_store_id: ID of the vector store containing the file batch\n:param status: Current processing status of the file batch\n:param file_counts: File processing status counts for the batch" + description: OpenAI Vector Store File Batch object. VectorStoreFileCounts: properties: completed: @@ -12392,7 +12442,7 @@ components: - in_progress - total title: VectorStoreFileCounts - description: "File processing status counts for a vector store.\n\n:param completed: Number of files that have been successfully processed\n:param cancelled: Number of files that had their processing cancelled\n:param failed: Number of files that failed to process\n:param in_progress: Number of files currently being processed\n:param total: Total number of files in the vector store" + description: File processing status counts for a vector store. VectorStoreFileLastError: properties: code: @@ -12410,7 +12460,7 @@ components: - code - message title: VectorStoreFileLastError - description: "Error information for failed vector store file processing.\n\n:param code: Error code indicating the type of failure\n:param message: Human-readable error message describing the failure" + description: Error information for failed vector store file processing. VectorStoreFileObject: properties: id: @@ -12465,7 +12515,7 @@ components: - status - vector_store_id title: VectorStoreFileObject - description: "OpenAI Vector Store File object.\n\n:param id: Unique identifier for the file\n:param object: Object type identifier, always \"vector_store.file\"\n:param attributes: Key-value attributes associated with the file\n:param chunking_strategy: Strategy used for splitting the file into chunks\n:param created_at: Timestamp when the file was added to the vector store\n:param last_error: (Optional) Error information if file processing failed\n:param status: Current processing status of the file\n:param usage_bytes: Storage space used by this file in bytes\n:param vector_store_id: ID of the vector store containing this file" + description: OpenAI Vector Store File object. VectorStoreObject: properties: id: @@ -12511,7 +12561,7 @@ components: - created_at - file_counts title: VectorStoreObject - description: "OpenAI Vector Store object.\n\n:param id: Unique identifier for the vector store\n:param object: Object type identifier, always \"vector_store\"\n:param created_at: Timestamp when the vector store was created\n:param name: (Optional) Name of the vector store\n:param usage_bytes: Storage space used by the vector store in bytes\n:param file_counts: File processing status counts for the vector store\n:param status: Current status of the vector store\n:param expires_after: (Optional) Expiration policy for the vector store\n:param expires_at: (Optional) Timestamp when the vector store will expire\n:param last_active_at: (Optional) Timestamp of last activity on the vector store\n:param metadata: Set of key-value pairs that can be attached to the vector store" + description: OpenAI Vector Store object. VectorStoreSearchResponse: properties: file_id: @@ -12543,7 +12593,7 @@ components: - score - content title: VectorStoreSearchResponse - description: "Response from searching a vector store.\n\n:param file_id: Unique identifier of the file containing the result\n:param filename: Name of the file containing the result\n:param score: Relevance score for this search result\n:param attributes: (Optional) Key-value attributes associated with the file\n:param content: List of content items matching the search query" + description: Response from searching a vector store. VectorStoreSearchResponsePage: properties: object: @@ -12570,7 +12620,7 @@ components: - search_query - data title: VectorStoreSearchResponsePage - description: "Paginated response from searching a vector store.\n\n:param object: Object type identifier for the search results page\n:param search_query: The original search query that was executed\n:param data: List of search result objects\n:param has_more: Whether there are more results available beyond this page\n:param next_page: (Optional) Token for retrieving the next page of results" + description: Paginated response from searching a vector store. VersionInfo: properties: version: @@ -12580,7 +12630,7 @@ components: required: - version title: VersionInfo - description: "Version information for the service.\n\n:param version: Version number of the service" + description: Version information for the service. ViolationLevel: type: string enum: @@ -12588,7 +12638,7 @@ components: - warn - error title: ViolationLevel - description: "Severity level of a safety violation.\n\n:cvar INFO: Informational level violation that does not require action\n:cvar WARN: Warning level violation that suggests caution but allows continuation\n:cvar ERROR: Error level violation that requires blocking or intervention" + description: Severity level of a safety violation. WeightedRanker: properties: type: @@ -12605,7 +12655,7 @@ components: default: 0.5 type: object title: WeightedRanker - description: "Weighted ranker configuration that combines vector and keyword scores.\n\n:param type: The type of ranker, always \"weighted\"\n:param alpha: Weight factor between 0 and 1.\n 0 means only use keyword scores,\n 1 means only use vector scores,\n values in between blend both scores." + description: Weighted ranker configuration that combines vector and keyword scores. _URLOrData: properties: url: @@ -12616,7 +12666,7 @@ components: type: string type: object title: _URLOrData - description: "A URL or a base64 encoded string\n\n:param url: A URL of the image or data URL in the format of data:image/{type};base64,{data}. Note that URL could have length limits.\n:param data: base64 encoded image data as string" + description: A URL or a base64 encoded string __main_____agents_agent_id_session_Request: properties: agent_id: @@ -13186,7 +13236,7 @@ components: - ranking_options title: _vector_stores_vector_store_id_search_Request Error: - description: "Error response from the API. Roughly follows RFC 7807.\n\n:param status: HTTP status code\n:param title: Error title, a short summary of the error which is invariant for an error type\n:param detail: Error detail, a longer human-readable description of the error\n:param instance: (Optional) A URL which can be used to retrieve more information about the specific occurrence of the error" + description: Error response from the API. Roughly follows RFC 7807. properties: status: title: Status @@ -13208,7 +13258,7 @@ components: title: Error type: object Agent: - description: "An agent instance with configuration and metadata.\n\n:param agent_id: Unique identifier for the agent\n:param agent_config: Configuration settings for the agent\n:param created_at: Timestamp when the agent was created" + description: An agent instance with configuration and metadata. properties: agent_id: title: Agent Id @@ -13226,7 +13276,7 @@ components: title: Agent type: object AgentStepResponse: - description: "Response containing details of a specific agent step.\n\n:param step: The complete step data and execution details" + description: Response containing details of a specific agent step. properties: step: discriminator: @@ -13247,7 +13297,7 @@ components: title: AgentStepResponse type: object CompletionMessage: - description: "A message containing the model's (assistant) response in a chat conversation.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param stop_reason: Reason why the model stopped generating. Options are:\n - `StopReason.end_of_turn`: The model finished generating the entire response.\n - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response.\n - `StopReason.out_of_tokens`: The model ran out of token budget.\n:param tool_calls: List of tool calls. Each tool call is a ToolCall object." + description: A message containing the model's (assistant) response in a chat conversation. properties: role: const: assistant @@ -13289,7 +13339,7 @@ components: title: CompletionMessage type: object InferenceStep: - description: "An inference step in an agent turn.\n\n:param model_response: The response from the LLM." + description: An inference step in an agent turn. properties: turn_id: title: Turn Id @@ -13321,7 +13371,7 @@ components: title: InferenceStep type: object ListOpenAIResponseInputItem: - description: "List container for OpenAI response input items.\n\n:param data: List of input items\n:param object: Object type identifier, always \"list\"" + description: List container for OpenAI response input items. properties: data: items: @@ -13359,7 +13409,7 @@ components: title: ListOpenAIResponseInputItem type: object ListOpenAIResponseObject: - description: "Paginated list of OpenAI response objects with navigation metadata.\n\n:param data: List of response objects with their input context\n:param has_more: Whether there are more results available beyond this page\n:param first_id: Identifier of the first item in this page\n:param last_id: Identifier of the last item in this page\n:param object: Object type identifier, always \"list\"" + description: Paginated list of OpenAI response objects with navigation metadata. properties: data: items: @@ -13388,7 +13438,7 @@ components: title: ListOpenAIResponseObject type: object MemoryRetrievalStep: - description: "A memory retrieval step in an agent turn.\n\n:param vector_store_ids: The IDs of the vector databases to retrieve context from.\n:param inserted_context: The context retrieved from the vector databases." + description: A memory retrieval step in an agent turn. properties: turn_id: title: Turn Id @@ -13444,7 +13494,7 @@ components: title: MemoryRetrievalStep type: object OpenAIDeleteResponseObject: - description: "Response object confirming deletion of an OpenAI response.\n\n:param id: Unique identifier of the deleted response\n:param object: Object type identifier, always \"response\"\n:param deleted: Deletion confirmation flag, always True" + description: Response object confirming deletion of an OpenAI response. properties: id: title: Id @@ -13463,7 +13513,7 @@ components: title: OpenAIDeleteResponseObject type: object PaginatedResponse: - description: "A generic paginated response that follows a simple format.\n\n:param data: The list of items for the current page\n:param has_more: Whether there are more items available after this set\n:param url: The URL for accessing this list" + description: A generic paginated response that follows a simple format. properties: data: items: @@ -13484,7 +13534,7 @@ components: title: PaginatedResponse type: object Session: - description: "A single session of an interaction with an Agentic System.\n\n:param session_id: Unique identifier for the conversation session\n:param session_name: Human-readable name for the session\n:param turns: List of all turns that have occurred in this session\n:param started_at: Timestamp when the session was created" + description: A single session of an interaction with an Agentic System. properties: session_id: title: Session Id @@ -13509,7 +13559,7 @@ components: title: Session type: object ShieldCallStep: - description: "A shield call step in an agent turn.\n\n:param violation: The violation from the shield call." + description: A shield call step in an agent turn. properties: turn_id: title: Turn Id @@ -13541,7 +13591,7 @@ components: title: ShieldCallStep type: object ToolExecutionStep: - description: "A tool execution step in an agent turn.\n\n:param tool_calls: The tool calls to execute.\n:param tool_responses: The tool responses from the tool calls." + description: A tool execution step in an agent turn. properties: turn_id: title: Turn Id @@ -13582,7 +13632,7 @@ components: title: ToolExecutionStep type: object ToolResponse: - description: "Response from a tool invocation.\n\n:param call_id: Unique identifier for the tool call this response is for\n:param tool_name: Name of the tool that was invoked\n:param content: The response content from the tool\n:param metadata: (Optional) Additional metadata about the tool response" + description: Response from a tool invocation. properties: call_id: title: Call Id @@ -13701,7 +13751,7 @@ components: title: ConversationItemDeletedResource type: object ListOpenAIFileResponse: - description: "Response for listing files in OpenAI Files API.\n\n:param data: List of file objects\n:param has_more: Whether there are more files available beyond this page\n:param first_id: ID of the first file in the list for pagination\n:param last_id: ID of the last file in the list for pagination\n:param object: The object type, which is always \"list\"" + description: Response for listing files in OpenAI Files API. properties: data: items: @@ -13730,7 +13780,7 @@ components: title: ListOpenAIFileResponse type: object OpenAIFileDeleteResponse: - description: "Response for deleting a file in OpenAI Files API.\n\n:param id: The file identifier that was deleted\n:param object: The object type, which is always \"file\"\n:param deleted: Whether the file was successfully deleted" + description: Response for deleting a file in OpenAI Files API. properties: id: title: Id @@ -13749,7 +13799,7 @@ components: title: OpenAIFileDeleteResponse type: object ListOpenAIChatCompletionResponse: - description: "Response from listing OpenAI-compatible chat completions.\n\n:param data: List of chat completion objects with their input messages\n:param has_more: Whether there are more completions available beyond this list\n:param first_id: ID of the first completion in this list\n:param last_id: ID of the last completion in this list\n:param object: Must be \"list\" to identify this as a list response" + description: Response from listing OpenAI-compatible chat completions. properties: data: items: @@ -15440,7 +15490,7 @@ components: title: ListOpenAIChatCompletionResponse type: object OpenAIAssistantMessageParam: - description: "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"assistant\" to identify this as the model's response\n:param content: The content of the model's response\n:param name: (Optional) The name of the assistant message participant.\n:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + description: A message containing the model's (assistant) response in an OpenAI-compatible chat completion request. properties: role: const: assistant @@ -15468,7 +15518,7 @@ components: title: OpenAIAssistantMessageParam type: object OpenAIChoice: - description: "A choice from an OpenAI-compatible chat completion response.\n\n:param message: The message from the model\n:param finish_reason: The reason the model stopped generating\n:param index: The index of the choice\n:param logprobs: (Optional) The log probabilities for the tokens in the message" + description: A choice from an OpenAI-compatible chat completion response. properties: message: discriminator: @@ -15502,7 +15552,7 @@ components: title: OpenAIChoice type: object OpenAIChoiceLogprobs: - description: "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response.\n\n:param content: (Optional) The log probabilities for the tokens in the message\n:param refusal: (Optional) The log probabilities for the tokens in the message" + description: The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response. properties: content: title: Content @@ -15569,7 +15619,7 @@ components: title: OpenAICompletionWithInputMessages type: object OpenAIUserMessageParam: - description: "A message from the user in an OpenAI-compatible chat completion request.\n\n:param role: Must be \"user\" to identify this as a user message\n:param content: The content of the message, which can include text and other media\n:param name: (Optional) The name of the user message participant." + description: A message from the user in an OpenAI-compatible chat completion request. properties: role: const: user @@ -15601,7 +15651,7 @@ components: title: OpenAIUserMessageParam type: object Checkpoint: - description: "Checkpoint created during training runs.\n\n:param identifier: Unique identifier for the checkpoint\n:param created_at: Timestamp when the checkpoint was created\n:param epoch: Training epoch when the checkpoint was saved\n:param post_training_job_id: Identifier of the training job that created this checkpoint\n:param path: File system path where the checkpoint is stored\n:param training_metrics: (Optional) Training metrics associated with this checkpoint" + description: Checkpoint created during training runs. properties: identifier: title: Identifier @@ -15631,7 +15681,7 @@ components: title: Checkpoint type: object PostTrainingJobArtifactsResponse: - description: "Artifacts of a finetuning job.\n\n:param job_uuid: Unique identifier for the training job\n:param checkpoints: List of model checkpoints created during training" + description: Artifacts of a finetuning job. properties: job_uuid: title: Job Uuid @@ -15646,7 +15696,7 @@ components: title: PostTrainingJobArtifactsResponse type: object PostTrainingJobStatusResponse: - description: "Status of a finetuning job.\n\n:param job_uuid: Unique identifier for the training job\n:param status: Current status of the training job\n:param scheduled_at: (Optional) Timestamp when the job was scheduled\n:param started_at: (Optional) Timestamp when the job execution began\n:param completed_at: (Optional) Timestamp when the job finished, if completed\n:param resources_allocated: (Optional) Information about computational resources allocated to the job\n:param checkpoints: List of model checkpoints created during training" + description: Status of a finetuning job. properties: job_uuid: title: Job Uuid @@ -15684,7 +15734,7 @@ components: title: PostTrainingJobStatusResponse type: object ScoringFn: - description: "A scoring function resource for evaluating model outputs.\n:param type: The resource type, always scoring_function" + description: A scoring function resource for evaluating model outputs. properties: identifier: description: Unique identifier for this resource in llama stack @@ -16484,7 +16534,7 @@ components: title: URL type: object ListToolDefsResponse: - description: "Response containing a list of tool definitions.\n\n:param data: List of tool definitions" + description: Response containing a list of tool definitions. properties: data: items: @@ -16496,7 +16546,7 @@ components: title: ListToolDefsResponse type: object VectorStoreDeleteResponse: - description: "Response from deleting a vector store.\n\n:param id: Unique identifier of the deleted vector store\n:param object: Object type identifier for the deletion response\n:param deleted: Whether the deletion operation was successful" + description: Response from deleting a vector store. properties: id: title: Id @@ -16514,7 +16564,7 @@ components: title: VectorStoreDeleteResponse type: object VectorStoreFileContentsResponse: - description: "Response from retrieving the contents of a vector store file.\n\n:param file_id: Unique identifier for the file\n:param filename: Name of the file\n:param attributes: Key-value attributes associated with the file\n:param content: List of content items from the file" + description: Response from retrieving the contents of a vector store file. properties: file_id: title: File Id @@ -16539,7 +16589,7 @@ components: title: VectorStoreFileContentsResponse type: object VectorStoreFileDeleteResponse: - description: "Response from deleting a vector store file.\n\n:param id: Unique identifier of the deleted file\n:param object: Object type identifier for the deletion response\n:param deleted: Whether the deletion operation was successful" + description: Response from deleting a vector store file. properties: id: title: Id @@ -16557,7 +16607,7 @@ components: title: VectorStoreFileDeleteResponse type: object VectorStoreFilesListInBatchResponse: - description: "Response from listing files in a vector store file batch.\n\n:param object: Object type identifier, always \"list\"\n:param data: List of vector store file objects in the batch\n:param first_id: (Optional) ID of the first file in the list for pagination\n:param last_id: (Optional) ID of the last file in the list for pagination\n:param has_more: Whether there are more files available beyond this page" + description: Response from listing files in a vector store file batch. properties: object: default: list @@ -16585,7 +16635,7 @@ components: title: VectorStoreFilesListInBatchResponse type: object VectorStoreListFilesResponse: - description: "Response from listing files in a vector store.\n\n:param object: Object type identifier, always \"list\"\n:param data: List of vector store file objects\n:param first_id: (Optional) ID of the first file in the list for pagination\n:param last_id: (Optional) ID of the last file in the list for pagination\n:param has_more: Whether there are more files available beyond this page" + description: Response from listing files in a vector store. properties: object: default: list @@ -16613,7 +16663,7 @@ components: title: VectorStoreListFilesResponse type: object VectorStoreListResponse: - description: "Response from listing vector stores.\n\n:param object: Object type identifier, always \"list\"\n:param data: List of vector store objects\n:param first_id: (Optional) ID of the first vector store in the list for pagination\n:param last_id: (Optional) ID of the last vector store in the list for pagination\n:param has_more: Whether there are more vector stores available beyond this page" + description: Response from listing vector stores. properties: object: default: list @@ -16699,7 +16749,7 @@ components: title: OpenAIResponseMessage type: object OpenAIResponseObjectWithInput: - description: "OpenAI response object extended with input context information.\n\n:param input: List of input items that led to this response" + description: OpenAI response object extended with input context information. properties: created_at: title: Created At @@ -18473,7 +18523,7 @@ components: title: OpenAIResponseObjectWithInput type: object ImageContentItem: - description: "A image content item\n\n:param type: Discriminator type of the content item. Always \"image\"\n:param image: Image as a base64 encoded string or an URL" + description: A image content item properties: type: const: image @@ -18688,9 +18738,9 @@ components: example: status: 500 title: Internal Server Error - detail: An unexpected error occurred + detail: An unexpected error occurred. Our team has been notified. DefaultError: - description: An error occurred + description: An unexpected error occurred content: application/json: schema: diff --git a/scripts/fastapi_generator.py b/scripts/fastapi_generator.py index 08b80a0fb..7a8b87afe 100755 --- a/scripts/fastapi_generator.py +++ b/scripts/fastapi_generator.py @@ -29,6 +29,9 @@ from llama_stack.core.server.routes import get_all_api_routes # Global list to store dynamic models created during endpoint generation _dynamic_models = [] +# Global mapping from (path, method) to webmethod for parameter description extraction +_path_webmethod_map: dict[tuple[str, str], Any] = {} + def _get_all_api_routes_with_functions(): """ @@ -107,18 +110,24 @@ def create_llama_stack_app() -> FastAPI: # Create FastAPI routes from the discovered routes for _, routes in api_routes.items(): for route, webmethod in routes: + # Store mapping for later use in parameter description extraction + for method in route.methods: + _path_webmethod_map[(route.path, method.lower())] = webmethod # Convert the route to a FastAPI endpoint _create_fastapi_endpoint(app, route, webmethod) return app -def _extract_path_parameters(path: str) -> list[dict[str, Any]]: +def _extract_path_parameters(path: str, webmethod=None) -> list[dict[str, Any]]: """ Extract path parameters from a URL path and return them as OpenAPI parameter definitions. + Parameters are returned in the order they appear in the docstring if available, + otherwise in the order they appear in the path. Args: path: URL path with parameters like /v1/batches/{batch_id}/cancel + webmethod: Optional webmethod to extract parameter descriptions from docstring Returns: List of parameter definitions for OpenAPI @@ -127,19 +136,62 @@ def _extract_path_parameters(path: str) -> list[dict[str, Any]]: # Find all path parameters in the format {param} or {param:type} param_pattern = r"\{([^}:]+)(?::[^}]+)?\}" - matches = re.findall(param_pattern, path) + path_params = set(re.findall(param_pattern, path)) + # Extract parameter descriptions and order from docstring if available + param_descriptions = {} + docstring_param_order = [] + if webmethod: + func = getattr(webmethod, "func", None) + if func and func.__doc__: + docstring = func.__doc__ + lines = docstring.split("\n") + for line in lines: + line = line.strip() + if line.startswith(":param "): + # Extract parameter name and description + # Format: :param param_name: description + parts = line[7:].split(":", 1) + if len(parts) == 2: + param_name = parts[0].strip() + description = parts[1].strip() + # Only track path parameters that exist in the path + if param_name in path_params: + if description: + param_descriptions[param_name] = description + if param_name not in docstring_param_order: + docstring_param_order.append(param_name) + + # Build parameters list preserving docstring order for path parameters found in docstring, + # then add any remaining path parameters in path order parameters = [] - for param_name in matches: - parameters.append( - { - "name": param_name, - "in": "path", - "required": True, - "schema": {"type": "string"}, - "description": f"Path parameter: {param_name}", - } - ) + # First add parameters in docstring order + for param_name in docstring_param_order: + if param_name in path_params: + description = param_descriptions.get(param_name, f"Path parameter: {param_name}") + parameters.append( + { + "name": param_name, + "in": "path", + "required": True, + "schema": {"type": "string"}, + "description": description, + } + ) + # Then add any path parameters not in docstring, in path order + path_param_list = re.findall(param_pattern, path) + for param_name in path_param_list: + if param_name not in docstring_param_order: + description = param_descriptions.get(param_name, f"Path parameter: {param_name}") + parameters.append( + { + "name": param_name, + "in": "path", + "required": True, + "schema": {"type": "string"}, + "description": description, + } + ) return parameters @@ -166,7 +218,8 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod): f"Debug: {webmethod.route} - request_model: {request_model}, response_model: {response_model}, query_parameters: {query_parameters}" ) - # Extract response description from webmethod docstring (always try this first) + # Extract summary and response description from webmethod docstring + summary = _extract_summary_from_docstring(webmethod) response_description = _extract_response_description_from_docstring(webmethod, response_model) # Create endpoint function with proper typing @@ -316,6 +369,9 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod): }, } + if summary: + route_kwargs["summary"] = summary + for method in methods: if method.upper() == "GET": app.get(fastapi_path, **route_kwargs)(endpoint_func) @@ -329,32 +385,51 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod): app.patch(fastapi_path, **route_kwargs)(endpoint_func) +def _extract_summary_from_docstring(webmethod) -> str | None: + """ + Extract summary from the actual function docstring. + The summary is typically the first non-empty line of the docstring, + before any :param:, :returns:, or other docstring field markers. + """ + func = getattr(webmethod, "func", None) + if not func: + return None + + docstring = func.__doc__ or "" + if not docstring: + return None + + lines = docstring.split("\n") + for line in lines: + line = line.strip() + if not line: + continue + if line.startswith(":param:") or line.startswith(":returns:") or line.startswith(":raises:"): + break + return line + + return None + + def _extract_response_description_from_docstring(webmethod, response_model) -> str: """ Extract response description from the actual function docstring. Looks for :returns: in the docstring and uses that as the description. """ - # Try to get the actual function from the webmethod - # The webmethod should have a reference to the original function func = getattr(webmethod, "func", None) if not func: - # If we can't get the function, return a generic description return "Successful Response" - # Get the function's docstring docstring = func.__doc__ or "" - # Look for :returns: line in the docstring lines = docstring.split("\n") for line in lines: line = line.strip() if line.startswith(":returns:"): - # Extract the description after :returns: - description = line[9:].strip() # Remove ':returns:' prefix + description = line[9:].strip() if description: return description - # If no :returns: found, return a generic description return "Successful Response" @@ -842,7 +917,11 @@ def _add_error_responses(openapi_schema: dict[str, Any]) -> dict[str, Any]: 500: { "name": "InternalServerError500", "description": "The server encountered an unexpected error", - "example": {"status": 500, "title": "Internal Server Error", "detail": "An unexpected error occurred"}, + "example": { + "status": 500, + "title": "Internal Server Error", + "detail": "An unexpected error occurred. Our team has been notified.", + }, }, } @@ -858,7 +937,7 @@ def _add_error_responses(openapi_schema: dict[str, Any]) -> dict[str, Any]: # Add a default error response openapi_schema["components"]["responses"]["DefaultError"] = { - "description": "An error occurred", + "description": "An unexpected error occurred", "content": {"application/json": {"schema": {"$ref": "#/components/schemas/Error"}}}, } @@ -868,29 +947,112 @@ def _add_error_responses(openapi_schema: dict[str, Any]) -> dict[str, Any]: def _fix_path_parameters(openapi_schema: dict[str, Any]) -> dict[str, Any]: """ Fix path parameter resolution issues by adding explicit parameter definitions. + Uses docstring descriptions if available. """ + global _path_webmethod_map + if "paths" not in openapi_schema: return openapi_schema for path, path_item in openapi_schema["paths"].items(): - # Extract path parameters from the URL - path_params = _extract_path_parameters(path) - - if not path_params: - continue - # Add parameters to each operation in this path for method in ["get", "post", "put", "delete", "patch", "head", "options"]: if method in path_item and isinstance(path_item[method], dict): operation = path_item[method] + + # Get webmethod for this path/method to extract parameter descriptions + webmethod = _path_webmethod_map.get((path, method)) + + # Extract path parameters from the URL with descriptions from docstring + path_params = _extract_path_parameters(path, webmethod) + + if not path_params: + continue + if "parameters" not in operation: operation["parameters"] = [] - # Add path parameters that aren't already defined - existing_param_names = {p.get("name") for p in operation["parameters"] if p.get("in") == "path"} + # Separate path and non-path parameters + existing_params = operation["parameters"] + non_path_params = [p for p in existing_params if p.get("in") != "path"] + existing_path_params = {p.get("name"): p for p in existing_params if p.get("in") == "path"} + + # Build new parameters list: non-path params first, then path params in docstring order + new_params = non_path_params.copy() + + # Add path parameters in docstring order for param in path_params: - if param["name"] not in existing_param_names: - operation["parameters"].append(param) + param_name = param["name"] + if param_name in existing_path_params: + # Update existing parameter description if we have a better one + existing_param = existing_path_params[param_name] + if param["description"] != f"Path parameter: {param_name}": + existing_param["description"] = param["description"] + new_params.append(existing_param) + else: + # Add new path parameter + new_params.append(param) + + operation["parameters"] = new_params + + return openapi_schema + + +def _extract_first_line_from_description(description: str) -> str: + """ + Extract all lines from a description string that don't start with docstring keywords. + Stops at the first line that starts with :param:, :returns:, :raises:, etc. + Preserves multiple lines and formatting. + """ + if not description: + return description + + lines = description.split("\n") + description_lines = [] + + for line in lines: + stripped = line.strip() + if not stripped: + # Keep empty lines in the description to preserve formatting + description_lines.append(line) + continue + if ( + stripped.startswith(":param") + or stripped.startswith(":returns") + or stripped.startswith(":raises") + or (stripped.startswith(":") and len(stripped) > 1 and stripped[1].isalpha()) + ): + break + description_lines.append(line) + + # Join lines and strip trailing whitespace/newlines + result = "\n".join(description_lines).rstrip() + return result if result else description + + +def _fix_component_descriptions(openapi_schema: dict[str, Any]) -> dict[str, Any]: + """ + Fix component descriptions to only include the first line (summary), + removing :param:, :returns:, and other docstring directives. + """ + if "components" not in openapi_schema or "schemas" not in openapi_schema["components"]: + return openapi_schema + + schemas = openapi_schema["components"]["schemas"] + + def fix_description_in_schema(schema_def: dict[str, Any]) -> None: + if isinstance(schema_def, dict): + if "description" in schema_def and isinstance(schema_def["description"], str): + schema_def["description"] = _extract_first_line_from_description(schema_def["description"]) + + for value in schema_def.values(): + fix_description_in_schema(value) + elif isinstance(schema_def, list): + for item in schema_def: + fix_description_in_schema(item) + + for _, schema_def in schemas.items(): + fix_description_in_schema(schema_def) return openapi_schema @@ -1409,6 +1571,9 @@ def generate_openapi_spec(output_dir: str, format: str = "yaml", include_example # Eliminate $defs section entirely for oasdiff compatibility openapi_schema = _eliminate_defs_section(openapi_schema) + # Fix component descriptions to only include first line (summary) + openapi_schema = _fix_component_descriptions(openapi_schema) + # Debug: Check if there's a root-level $defs after flattening if "$defs" in openapi_schema: print(f"After flattening: root-level $defs with {len(openapi_schema['$defs'])} items") @@ -1485,7 +1650,7 @@ def generate_openapi_spec(output_dir: str, format: str = "yaml", include_example if format in ["yaml", "both"]: yaml_path = output_path / "llama-stack-spec.yaml" - # Use ruamel.yaml for better control over YAML serialization + # Use ruamel.yaml for better YAML formatting try: from ruamel.yaml import YAML @@ -1497,46 +1662,52 @@ def generate_openapi_spec(output_dir: str, format: str = "yaml", include_example with open(yaml_path, "w") as f: yaml_writer.dump(stable_schema, f) + + # Post-process the YAML file to remove $defs section and fix references + # Re-read and re-write with ruamel.yaml + with open(yaml_path) as f: + yaml_content = f.read() + + if " $defs:" in yaml_content or "#/$defs/" in yaml_content: + print("Post-processing YAML to remove $defs section") + + # Use string replacement to fix references directly + if "#/$defs/" in yaml_content: + refs_fixed = yaml_content.count("#/$defs/") + yaml_content = yaml_content.replace("#/$defs/", "#/components/schemas/") + print(f"Fixed {refs_fixed} $ref references using string replacement") + + # Parse using PyYAML safe_load first to avoid issues with custom types + # This handles block scalars better during post-processing + import yaml as pyyaml + + with open(yaml_path) as f: + yaml_data = pyyaml.safe_load(f) + + # Move $defs to components/schemas if it exists + if "$defs" in yaml_data: + print(f"Found $defs section with {len(yaml_data['$defs'])} items") + if "components" not in yaml_data: + yaml_data["components"] = {} + if "schemas" not in yaml_data["components"]: + yaml_data["components"]["schemas"] = {} + + # Move all $defs to components/schemas + for def_name, def_schema in yaml_data["$defs"].items(): + yaml_data["components"]["schemas"][def_name] = def_schema + + # Remove the $defs section + del yaml_data["$defs"] + print("Moved $defs to components/schemas") + + # Write the modified YAML back with ruamel.yaml + with open(yaml_path, "w") as f: + yaml_writer.dump(yaml_data, f) + print("Updated YAML file") except ImportError: # Fallback to standard yaml if ruamel.yaml is not available with open(yaml_path, "w") as f: yaml.dump(stable_schema, f, default_flow_style=False, sort_keys=False) - # Post-process the YAML file to remove $defs section and fix references - with open(yaml_path) as f: - yaml_content = f.read() - - if " $defs:" in yaml_content or "#/$defs/" in yaml_content: - print("Post-processing YAML to remove $defs section") - - # Use string replacement to fix references directly - if "#/$defs/" in yaml_content: - refs_fixed = yaml_content.count("#/$defs/") - yaml_content = yaml_content.replace("#/$defs/", "#/components/schemas/") - print(f"Fixed {refs_fixed} $ref references using string replacement") - - # Parse the YAML content - yaml_data = yaml.safe_load(yaml_content) - - # Move $defs to components/schemas if it exists - if "$defs" in yaml_data: - print(f"Found $defs section with {len(yaml_data['$defs'])} items") - if "components" not in yaml_data: - yaml_data["components"] = {} - if "schemas" not in yaml_data["components"]: - yaml_data["components"]["schemas"] = {} - - # Move all $defs to components/schemas - for def_name, def_schema in yaml_data["$defs"].items(): - yaml_data["components"]["schemas"][def_name] = def_schema - - # Remove the $defs section - del yaml_data["$defs"] - print("Moved $defs to components/schemas") - - # Write the modified YAML back - with open(yaml_path, "w") as f: - yaml.dump(yaml_data, f, default_flow_style=False, sort_keys=False) - print("Updated YAML file") print(f"✅ Generated YAML (stable): {yaml_path}") @@ -1643,7 +1814,7 @@ def main(): parser = argparse.ArgumentParser(description="Generate OpenAPI specification using FastAPI") parser.add_argument("output_dir", help="Output directory for generated files") - parser.add_argument("--format", choices=["yaml", "json", "both"], default="yaml", help="Output format") + parser.add_argument("--format", choices=["yaml", "json", "both"], default="both", help="Output format") parser.add_argument("--no-examples", action="store_true", help="Exclude examples from the specification") parser.add_argument( "--validate-only", action="store_true", help="Only validate existing schema files, don't generate new ones"