mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
Introduce extract_type_annotation method
Signed-off-by: thepetk <thepetk@gmail.com>
This commit is contained in:
parent
63887f2a21
commit
58e81839a4
44 changed files with 87 additions and 71 deletions
|
|
@ -14,7 +14,7 @@ Meta's reference implementation of an agent system that can use tools, access ve
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `persistence` | `inline.agents.meta_reference.config.AgentPersistenceConfig` | No | | |
|
| `persistence` | `AgentPersistenceConfig` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ Reference implementation of batches API with KVStore persistence.
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `kvstore` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Configuration for the key-value store backend. |
|
| `kvstore` | `KVStoreReference` | No | | Configuration for the key-value store backend. |
|
||||||
| `max_concurrent_batches` | `int` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
|
| `max_concurrent_batches` | `int` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
|
||||||
| `max_concurrent_requests_per_batch` | `int` | No | 10 | Maximum number of concurrent requests to process per batch. |
|
| `max_concurrent_requests_per_batch` | `int` | No | 10 | Maximum number of concurrent requests to process per batch. |
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ Local filesystem-based dataset I/O provider for reading and writing datasets to
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `kvstore` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `kvstore` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ HuggingFace datasets provider for accessing and managing datasets from the Huggi
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `kvstore` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `kvstore` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ Meta's reference implementation of evaluation tasks with support for multiple la
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `kvstore` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `kvstore` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ Local filesystem-based file storage provider for managing files and documents lo
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `storage_dir` | `str` | No | | Directory to store uploaded files |
|
| `storage_dir` | `str` | No | | Directory to store uploaded files |
|
||||||
| `metadata_store` | `llama_stack.core.storage.datatypes.SqlStoreReference` | No | | SQL store configuration for file metadata |
|
| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
|
||||||
| `ttl_secs` | `int` | No | 31536000 | |
|
| `ttl_secs` | `int` | No | 31536000 | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ OpenAI Files API provider for managing files through OpenAI's native file storag
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `api_key` | `str` | No | | OpenAI API key for authentication |
|
| `api_key` | `str` | No | | OpenAI API key for authentication |
|
||||||
| `metadata_store` | `llama_stack.core.storage.datatypes.SqlStoreReference` | No | | SQL store configuration for file metadata |
|
| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ AWS S3-based file storage provider for scalable cloud file management with metad
|
||||||
| `aws_secret_access_key` | `str \| None` | No | | AWS secret access key (optional if using IAM roles) |
|
| `aws_secret_access_key` | `str \| None` | No | | AWS secret access key (optional if using IAM roles) |
|
||||||
| `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) |
|
| `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) |
|
||||||
| `auto_create_bucket` | `bool` | No | False | Automatically create the S3 bucket if it doesn't exist |
|
| `auto_create_bucket` | `bool` | No | False | Automatically create the S3 bucket if it doesn't exist |
|
||||||
| `metadata_store` | `llama_stack.core.storage.datatypes.SqlStoreReference` | No | | SQL store configuration for file metadata |
|
| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ Meta's reference implementation of inference with support for various model form
|
||||||
| `model_parallel_size` | `int \| None` | No | | |
|
| `model_parallel_size` | `int \| None` | No | | |
|
||||||
| `create_distributed_process_group` | `bool` | No | True | |
|
| `create_distributed_process_group` | `bool` | No | True | |
|
||||||
| `checkpoint_dir` | `str \| None` | No | | |
|
| `checkpoint_dir` | `str \| None` | No | | |
|
||||||
| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig` | No | | |
|
| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig \| None` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Anthropic inference provider for accessing Claude models and Anthropic's AI serv
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,8 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `api_base` | `pydantic.networks.HttpUrl` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com) |
|
| `api_base` | `HttpUrl` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com) |
|
||||||
| `api_version` | `str \| None` | No | | Azure API version for Azure (e.g., 2024-12-01-preview) |
|
| `api_version` | `str \| None` | No | | Azure API version for Azure (e.g., 2024-12-01-preview) |
|
||||||
| `api_type` | `str \| None` | No | azure | Azure API type for Azure (e.g., azure) |
|
| `api_type` | `str \| None` | No | azure | Azure API type for Azure (e.g., azure) |
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Cerebras inference provider for running models on Cerebras Cloud platform.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `base_url` | `str` | No | https://api.cerebras.ai | Base URL for the Cerebras API |
|
| `base_url` | `str` | No | https://api.cerebras.ai | Base URL for the Cerebras API |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Databricks inference provider for running models on Databricks' unified analytic
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | The Databricks API token |
|
| `api_token` | `SecretStr \| None` | No | | The Databricks API token |
|
||||||
| `url` | `str \| None` | No | | The URL for the Databricks model serving endpoint |
|
| `url` | `str \| None` | No | | The URL for the Databricks model serving endpoint |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Fireworks AI inference provider for Llama models and other AI models on the Fire
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `url` | `str` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server |
|
| `url` | `str` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Google Gemini inference provider for accessing Gemini models and Google's AI ser
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Groq inference provider for ultra-fast inference using Groq's LPU technology.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `url` | `str` | No | https://api.groq.com | The URL for the Groq AI server |
|
| `url` | `str` | No | https://api.groq.com | The URL for the Groq AI server |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ HuggingFace Inference Endpoints provider for dedicated model serving.
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `endpoint_name` | `str` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. |
|
| `endpoint_name` | `str` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. |
|
||||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
| `api_token` | `SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ HuggingFace Inference API serverless provider for on-demand model inference.
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `huggingface_repo` | `str` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') |
|
| `huggingface_repo` | `str` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') |
|
||||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
| `api_token` | `SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Llama OpenAI-compatible provider for using Llama models with OpenAI API format.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `openai_compat_api_base` | `str` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server |
|
| `openai_compat_api_base` | `str` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ NVIDIA inference provider for accessing NVIDIA NIM models and AI services.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `url` | `str` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM |
|
| `url` | `str` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM |
|
||||||
| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
|
| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
|
||||||
| `append_api_version` | `bool` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. |
|
| `append_api_version` | `bool` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. |
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ OpenAI inference provider for accessing GPT models and other OpenAI services.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `base_url` | `str` | No | https://api.openai.com/v1 | Base URL for OpenAI API |
|
| `base_url` | `str` | No | https://api.openai.com/v1 | Base URL for OpenAI API |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -14,10 +14,10 @@ Passthrough inference provider for connecting to any external inference service
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | API Key for the passthrouth endpoint |
|
||||||
| `url` | `<class 'str'>` | No | | The URL for the passthrough endpoint |
|
| `url` | `str` | No | | The URL for the passthrough endpoint |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ RunPod inference provider for running models on RunPod's cloud GPU platform.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | The API token |
|
| `api_token` | `SecretStr \| None` | No | | The API token |
|
||||||
| `url` | `str \| None` | No | | The URL for the Runpod model serving endpoint |
|
| `url` | `str \| None` | No | | The URL for the Runpod model serving endpoint |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ SambaNova inference provider for running models on SambaNova's dataflow architec
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `url` | `str` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
|
| `url` | `str` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Together AI inference provider for open-source models and collaborative AI devel
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `url` | `str` | No | https://api.together.xyz/v1 | The URL for the Together AI server |
|
| `url` | `str` | No | https://api.together.xyz/v1 | The URL for the Together AI server |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ Remote vLLM inference provider for connecting to vLLM servers.
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | The API token |
|
| `api_token` | `SecretStr \| None` | No | | The API token |
|
||||||
| `url` | `str \| None` | No | | The URL for the vLLM model serving endpoint |
|
| `url` | `str \| None` | No | | The URL for the vLLM model serving endpoint |
|
||||||
| `max_tokens` | `int` | No | 4096 | Maximum number of tokens to generate. |
|
| `max_tokens` | `int` | No | 4096 | Maximum number of tokens to generate. |
|
||||||
| `tls_verify` | `bool \| str` | No | True | Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file. |
|
| `tls_verify` | `bool \| str` | No | True | Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file. |
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||||
| `url` | `str` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai |
|
| `url` | `str` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai |
|
||||||
| `project_id` | `str \| None` | No | | The watsonx.ai project ID |
|
| `project_id` | `str \| None` | No | | The watsonx.ai project ID |
|
||||||
| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
|
| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
|
||||||
|
|
|
||||||
|
|
@ -15,8 +15,8 @@ HuggingFace-based post-training provider for fine-tuning models using the Huggin
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `device` | `str` | No | cuda | |
|
| `device` | `str` | No | cuda | |
|
||||||
| `distributed_backend` | `Literal['fsdp', 'deepspeed' \| None]` | No | | |
|
| `distributed_backend` | `Literal[fsdp, deepspeed] \| None` | No | | |
|
||||||
| `checkpoint_format` | `Literal['full_state', 'huggingface' \| None]` | No | huggingface | |
|
| `checkpoint_format` | `Literal[full_state, huggingface] \| None` | No | huggingface | |
|
||||||
| `chat_template` | `str` | No | `<|user|>`<br/>`{input}`<br/>`<|assistant|>`<br/>`{output}` | |
|
| `chat_template` | `str` | No | `<|user|>`<br/>`{input}`<br/>`<|assistant|>`<br/>`{output}` | |
|
||||||
| `model_specific_config` | `dict` | No | `{'trust_remote_code': True, 'attn_implementation': 'sdpa'}` | |
|
| `model_specific_config` | `dict` | No | `{'trust_remote_code': True, 'attn_implementation': 'sdpa'}` | |
|
||||||
| `max_seq_length` | `int` | No | 2048 | |
|
| `max_seq_length` | `int` | No | 2048 | |
|
||||||
|
|
@ -29,7 +29,7 @@ HuggingFace-based post-training provider for fine-tuning models using the Huggin
|
||||||
| `dataloader_pin_memory` | `bool` | No | True | |
|
| `dataloader_pin_memory` | `bool` | No | True | |
|
||||||
| `dpo_beta` | `float` | No | 0.1 | |
|
| `dpo_beta` | `float` | No | 0.1 | |
|
||||||
| `use_reference_model` | `bool` | No | True | |
|
| `use_reference_model` | `bool` | No | True | |
|
||||||
| `dpo_loss_type` | `Literal['sigmoid', 'hinge', 'ipo', 'kto_pair']` | No | sigmoid | |
|
| `dpo_loss_type` | `Literal[sigmoid, hinge, ipo, kto_pair]` | No | sigmoid | |
|
||||||
| `dpo_output_dir` | `str` | No | | |
|
| `dpo_output_dir` | `str` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ TorchTune-based post-training provider for fine-tuning and optimizing models usi
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `torch_seed` | `int \| None` | No | | |
|
| `torch_seed` | `int \| None` | No | | |
|
||||||
| `checkpoint_format` | `Literal['meta', 'huggingface' \| None]` | No | meta | |
|
| `checkpoint_format` | `Literal[meta, huggingface] \| None` | No | meta | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ TorchTune-based post-training provider for fine-tuning and optimizing models usi
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `torch_seed` | `int \| None` | No | | |
|
| `torch_seed` | `int \| None` | No | | |
|
||||||
| `checkpoint_format` | `Literal['meta', 'huggingface' \| None]` | No | meta | |
|
| `checkpoint_format` | `Literal[meta, huggingface] \| None` | No | meta | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ SambaNova's safety provider for content moderation and safety filtering.
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `url` | `str` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
|
| `url` | `str` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server |
|
||||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | The SambaNova cloud API Key |
|
| `api_key` | `SecretStr \| None` | No | | The SambaNova cloud API Key |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `db_path` | `str` | No | | |
|
| `db_path` | `str` | No | | |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Config for KV store backend |
|
| `persistence` | `KVStoreReference` | No | | Config for KV store backend |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ more details about Faiss in general.
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `persistence` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ Meta's reference implementation of a vector database.
|
||||||
|
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `persistence` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ Please refer to the remote provider documentation.
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `db_path` | `str` | No | | |
|
| `db_path` | `str` | No | | |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
|
| `persistence` | `KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
|
||||||
| `consistency_level` | `str` | No | Strong | The consistency level of the Milvus server |
|
| `consistency_level` | `str` | No | Strong | The consistency level of the Milvus server |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `path` | `str` | No | | |
|
| `path` | `str` | No | | |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `persistence` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -408,7 +408,7 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `db_path` | `str` | No | | Path to the SQLite database file |
|
| `db_path` | `str` | No | | Path to the SQLite database file |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
|
| `persistence` | `KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ Please refer to the sqlite-vec provider documentation.
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `db_path` | `str` | No | | Path to the SQLite database file |
|
| `db_path` | `str` | No | | Path to the SQLite database file |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
|
| `persistence` | `KVStoreReference` | No | | Config for KV store backend (SQLite only for now) |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `url` | `str \| None` | No | | |
|
| `url` | `str \| None` | No | | |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Config for KV store backend |
|
| `persistence` | `KVStoreReference` | No | | Config for KV store backend |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -408,7 +408,7 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi
|
||||||
| `uri` | `str` | No | | The URI of the Milvus server |
|
| `uri` | `str` | No | | The URI of the Milvus server |
|
||||||
| `token` | `str \| None` | No | | The token of the Milvus server |
|
| `token` | `str \| None` | No | | The token of the Milvus server |
|
||||||
| `consistency_level` | `str` | No | Strong | The consistency level of the Milvus server |
|
| `consistency_level` | `str` | No | Strong | The consistency level of the Milvus server |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | Config for KV store backend |
|
| `persistence` | `KVStoreReference` | No | | Config for KV store backend |
|
||||||
| `config` | `dict` | No | `{}` | This configuration allows additional fields to be passed through to the underlying Milvus client. See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general. |
|
| `config` | `dict` | No | `{}` | This configuration allows additional fields to be passed through to the underlying Milvus client. See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general. |
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
|
||||||
|
|
@ -218,7 +218,7 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de
|
||||||
| `db` | `str \| None` | No | postgres | |
|
| `db` | `str \| None` | No | postgres | |
|
||||||
| `user` | `str \| None` | No | postgres | |
|
| `user` | `str \| None` | No | postgres | |
|
||||||
| `password` | `str \| None` | No | mysecretpassword | |
|
| `password` | `str \| None` | No | mysecretpassword | |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
|
| `persistence` | `KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ Please refer to the inline provider documentation.
|
||||||
| `prefix` | `str \| None` | No | | |
|
| `prefix` | `str \| None` | No | | |
|
||||||
| `timeout` | `int \| None` | No | | |
|
| `timeout` | `int \| None` | No | | |
|
||||||
| `host` | `str \| None` | No | | |
|
| `host` | `str \| None` | No | | |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference` | No | | |
|
| `persistence` | `KVStoreReference` | No | | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -75,7 +75,7 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `weaviate_api_key` | `str \| None` | No | | The API key for the Weaviate instance |
|
| `weaviate_api_key` | `str \| None` | No | | The API key for the Weaviate instance |
|
||||||
| `weaviate_cluster_url` | `str \| None` | No | localhost:8080 | The URL of the Weaviate cluster |
|
| `weaviate_cluster_url` | `str \| None` | No | localhost:8080 | The URL of the Weaviate cluster |
|
||||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
|
| `persistence` | `KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,11 +5,11 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
import re
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from types import UnionType
|
||||||
|
from typing import Annotated, Any, Union, get_args, get_origin
|
||||||
|
|
||||||
from pydantic_core import PydanticUndefined
|
from pydantic_core import PydanticUndefined
|
||||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||||
|
|
@ -54,6 +54,41 @@ class ChangedPathTracker:
|
||||||
return self._changed_paths
|
return self._changed_paths
|
||||||
|
|
||||||
|
|
||||||
|
def extract_type_annotation(annotation: Any) -> str:
|
||||||
|
"""extract a type annotation into a clean string representation."""
|
||||||
|
if annotation is None:
|
||||||
|
return "Any"
|
||||||
|
|
||||||
|
if annotation is type(None):
|
||||||
|
return "None"
|
||||||
|
|
||||||
|
origin = get_origin(annotation)
|
||||||
|
args = get_args(annotation)
|
||||||
|
|
||||||
|
# recursive workaround for Annotated types to ignore FieldInfo part
|
||||||
|
if origin is Annotated and args:
|
||||||
|
return extract_type_annotation(args[0])
|
||||||
|
|
||||||
|
if origin in [Union, UnionType]:
|
||||||
|
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||||
|
has_none = len(non_none_args) < len(args)
|
||||||
|
|
||||||
|
if len(non_none_args) == 1:
|
||||||
|
formatted = extract_type_annotation(non_none_args[0])
|
||||||
|
return f"{formatted} | None" if has_none else formatted
|
||||||
|
else:
|
||||||
|
formatted_args = [extract_type_annotation(arg) for arg in non_none_args]
|
||||||
|
result = " | ".join(formatted_args)
|
||||||
|
return f"{result} | None" if has_none else result
|
||||||
|
|
||||||
|
if origin is not None and args:
|
||||||
|
origin_name = getattr(origin, "__name__", str(origin))
|
||||||
|
formatted_args = [extract_type_annotation(arg) for arg in args]
|
||||||
|
return f"{origin_name}[{', '.join(formatted_args)}]"
|
||||||
|
|
||||||
|
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
|
||||||
|
|
||||||
|
|
||||||
def get_config_class_info(config_class_path: str) -> dict[str, Any]:
|
def get_config_class_info(config_class_path: str) -> dict[str, Any]:
|
||||||
"""Extract configuration information from a config class."""
|
"""Extract configuration information from a config class."""
|
||||||
try:
|
try:
|
||||||
|
|
@ -84,27 +119,8 @@ def get_config_class_info(config_class_path: str) -> dict[str, Any]:
|
||||||
for field_name, field in config_class.model_fields.items():
|
for field_name, field in config_class.model_fields.items():
|
||||||
if getattr(field, "exclude", False):
|
if getattr(field, "exclude", False):
|
||||||
continue
|
continue
|
||||||
field_type = str(field.annotation) if field.annotation else "Any"
|
|
||||||
|
|
||||||
# this string replace is ridiculous
|
field_type = extract_type_annotation(field.annotation)
|
||||||
field_type = (
|
|
||||||
field_type.replace("typing.", "")
|
|
||||||
.replace("Optional[", "")
|
|
||||||
.replace("]", "")
|
|
||||||
)
|
|
||||||
field_type = (
|
|
||||||
field_type.replace("Annotated[", "")
|
|
||||||
.replace("FieldInfo(", "")
|
|
||||||
.replace(")", "")
|
|
||||||
)
|
|
||||||
field_type = field_type.replace("llama_stack_api.inference.", "")
|
|
||||||
field_type = field_type.replace("llama_stack.providers.", "")
|
|
||||||
field_type = field_type.replace("llama_stack_api.datatypes.", "")
|
|
||||||
|
|
||||||
field_type = re.sub(r"Optional\[([^\]]+)\]", r"\1 | None", field_type)
|
|
||||||
field_type = re.sub(r"Annotated\[([^,]+),.*?\]", r"\1", field_type)
|
|
||||||
field_type = re.sub(r"FieldInfo\([^)]*\)", "", field_type)
|
|
||||||
field_type = re.sub(r"<class '([^']+)'>", r"\1", field_type)
|
|
||||||
|
|
||||||
default_value = field.default
|
default_value = field.default
|
||||||
if field.default_factory is not None:
|
if field.default_factory is not None:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue