diff --git a/docs/docs/providers/files/remote_s3.mdx b/docs/docs/providers/files/remote_s3.mdx index a065da5f8..adf4bced0 100644 --- a/docs/docs/providers/files/remote_s3.mdx +++ b/docs/docs/providers/files/remote_s3.mdx @@ -17,7 +17,7 @@ AWS S3-based file storage provider for scalable cloud file management with metad | `bucket_name` | `` | No | | S3 bucket name to store files | | `region` | `` | No | us-east-1 | AWS region where the bucket is located | | `aws_access_key_id` | `str \| None` | No | | AWS access key ID (optional if using IAM roles) | -| `aws_secret_access_key` | `pydantic.types.SecretStr \| None` | No | | AWS secret access key (optional if using IAM roles) | +| `aws_secret_access_key` | `` | No | | AWS secret access key (optional if using IAM roles) | | `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) | | `auto_create_bucket` | `` | No | False | Automatically create the S3 bucket if it doesn't exist | | `metadata_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | SQL store configuration for file metadata | diff --git a/docs/docs/providers/inference/remote_anthropic.mdx b/docs/docs/providers/inference/remote_anthropic.mdx index c8115c7a8..f795ad3f1 100644 --- a/docs/docs/providers/inference/remote_anthropic.mdx +++ b/docs/docs/providers/inference/remote_anthropic.mdx @@ -14,7 +14,7 @@ Anthropic inference provider for accessing Claude models and Anthropic's AI serv | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | API key for Anthropic models | +| `api_key` | `` | No | | API key for Anthropic models | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_bedrock.mdx b/docs/docs/providers/inference/remote_bedrock.mdx index 3a4c296cf..ff1ed5ad8 100644 --- a/docs/docs/providers/inference/remote_bedrock.mdx +++ b/docs/docs/providers/inference/remote_bedrock.mdx @@ -15,8 +15,8 @@ AWS Bedrock inference provider for accessing various AI models through AWS's man | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `aws_access_key_id` | `str \| None` | No | | The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID | -| `aws_secret_access_key` | `pydantic.types.SecretStr \| None` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY | -| `aws_session_token` | `pydantic.types.SecretStr \| None` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN | +| `aws_secret_access_key` | `` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY | +| `aws_session_token` | `` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN | | `region_name` | `str \| None` | No | | The default AWS Region to use, for example, us-west-1 or us-west-2.Default use environment variable: AWS_DEFAULT_REGION | | `profile_name` | `str \| None` | No | | The profile name that contains credentials to use.Default use environment variable: AWS_PROFILE | | `total_max_attempts` | `int \| None` | No | | An integer representing the maximum number of attempts that will be made for a single request, including the initial attempt. Default use environment variable: AWS_MAX_ATTEMPTS | diff --git a/docs/docs/providers/inference/remote_fireworks.mdx b/docs/docs/providers/inference/remote_fireworks.mdx index d2c3a664e..0f37ccbc2 100644 --- a/docs/docs/providers/inference/remote_fireworks.mdx +++ b/docs/docs/providers/inference/remote_fireworks.mdx @@ -16,7 +16,7 @@ Fireworks AI inference provider for Llama models and other AI models on the Fire |-------|------|----------|---------|-------------| | `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `url` | `` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Fireworks.ai API Key | +| `api_key` | `` | No | | The Fireworks.ai API Key | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_gemini.mdx b/docs/docs/providers/inference/remote_gemini.mdx index ce7c797c4..d9a2f3e8d 100644 --- a/docs/docs/providers/inference/remote_gemini.mdx +++ b/docs/docs/providers/inference/remote_gemini.mdx @@ -14,7 +14,7 @@ Google Gemini inference provider for accessing Gemini models and Google's AI ser | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | API key for Gemini models | +| `api_key` | `` | No | | API key for Gemini models | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_groq.mdx b/docs/docs/providers/inference/remote_groq.mdx index c2745ed98..b6d29496e 100644 --- a/docs/docs/providers/inference/remote_groq.mdx +++ b/docs/docs/providers/inference/remote_groq.mdx @@ -14,7 +14,7 @@ Groq inference provider for ultra-fast inference using Groq's LPU technology. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Groq API key | +| `api_key` | `` | No | | The Groq API key | | `url` | `` | No | https://api.groq.com | The URL for the Groq AI server | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_llama-openai-compat.mdx b/docs/docs/providers/inference/remote_llama-openai-compat.mdx index b7b0204e6..491774844 100644 --- a/docs/docs/providers/inference/remote_llama-openai-compat.mdx +++ b/docs/docs/providers/inference/remote_llama-openai-compat.mdx @@ -14,7 +14,7 @@ Llama OpenAI-compatible provider for using Llama models with OpenAI API format. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Llama API key | +| `api_key` | `` | No | | The Llama API key | | `openai_compat_api_base` | `` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_nvidia.mdx b/docs/docs/providers/inference/remote_nvidia.mdx index 4a8be5d03..9aee22e9a 100644 --- a/docs/docs/providers/inference/remote_nvidia.mdx +++ b/docs/docs/providers/inference/remote_nvidia.mdx @@ -15,7 +15,7 @@ NVIDIA inference provider for accessing NVIDIA NIM models and AI services. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `url` | `` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The NVIDIA API key, only needed of using the hosted service | +| `api_key` | `` | No | | The NVIDIA API key, only needed of using the hosted service | | `timeout` | `` | No | 60 | Timeout for the HTTP requests | | `append_api_version` | `` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. | diff --git a/docs/docs/providers/inference/remote_openai.mdx b/docs/docs/providers/inference/remote_openai.mdx index 2c7650bb5..f82bea154 100644 --- a/docs/docs/providers/inference/remote_openai.mdx +++ b/docs/docs/providers/inference/remote_openai.mdx @@ -14,7 +14,7 @@ OpenAI inference provider for accessing GPT models and other OpenAI services. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | API key for OpenAI models | +| `api_key` | `` | No | | API key for OpenAI models | | `base_url` | `` | No | https://api.openai.com/v1 | Base URL for OpenAI API | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_passthrough.mdx b/docs/docs/providers/inference/remote_passthrough.mdx index 972cc2a08..efaa01d04 100644 --- a/docs/docs/providers/inference/remote_passthrough.mdx +++ b/docs/docs/providers/inference/remote_passthrough.mdx @@ -15,7 +15,7 @@ Passthrough inference provider for connecting to any external inference service | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `url` | `` | No | | The URL for the passthrough endpoint | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | API Key for the passthrouth endpoint | +| `api_key` | `` | No | | API Key for the passthrouth endpoint | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_runpod.mdx b/docs/docs/providers/inference/remote_runpod.mdx index 5d3655474..1def462f6 100644 --- a/docs/docs/providers/inference/remote_runpod.mdx +++ b/docs/docs/providers/inference/remote_runpod.mdx @@ -15,7 +15,7 @@ RunPod inference provider for running models on RunPod's cloud GPU platform. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `url` | `str \| None` | No | | The URL for the Runpod model serving endpoint | -| `api_token` | `pydantic.types.SecretStr \| None` | No | | The API token | +| `api_token` | `` | No | | The API token | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_sambanova.mdx b/docs/docs/providers/inference/remote_sambanova.mdx index 6ee28b400..b5d64e6d9 100644 --- a/docs/docs/providers/inference/remote_sambanova.mdx +++ b/docs/docs/providers/inference/remote_sambanova.mdx @@ -15,7 +15,7 @@ SambaNova inference provider for running models on SambaNova's dataflow architec | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `url` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The SambaNova cloud API Key | +| `api_key` | `` | No | | The SambaNova cloud API Key | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_together.mdx b/docs/docs/providers/inference/remote_together.mdx index da232a45b..0669a1112 100644 --- a/docs/docs/providers/inference/remote_together.mdx +++ b/docs/docs/providers/inference/remote_together.mdx @@ -16,7 +16,7 @@ Together AI inference provider for open-source models and collaborative AI devel |-------|------|----------|---------|-------------| | `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | | `url` | `` | No | https://api.together.xyz/v1 | The URL for the Together AI server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Together AI API Key | +| `api_key` | `` | No | | The Together AI API Key | ## Sample Configuration diff --git a/docs/docs/providers/inference/remote_vllm.mdx b/docs/docs/providers/inference/remote_vllm.mdx index 1faf131b9..5f9af4db4 100644 --- a/docs/docs/providers/inference/remote_vllm.mdx +++ b/docs/docs/providers/inference/remote_vllm.mdx @@ -16,7 +16,7 @@ Remote vLLM inference provider for connecting to vLLM servers. |-------|------|----------|---------|-------------| | `url` | `str \| None` | No | | The URL for the vLLM model serving endpoint | | `max_tokens` | `` | No | 4096 | Maximum number of tokens to generate. | -| `api_token` | `pydantic.types.SecretStr \| None` | No | ********** | The API token | +| `api_token` | `` | No | | The API token | | `tls_verify` | `bool \| str` | No | True | Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file. | | `refresh_models` | `` | No | False | Whether to refresh models periodically | diff --git a/docs/docs/providers/inference/remote_watsonx.mdx b/docs/docs/providers/inference/remote_watsonx.mdx index 1ceccc3ed..c1ab57a7b 100644 --- a/docs/docs/providers/inference/remote_watsonx.mdx +++ b/docs/docs/providers/inference/remote_watsonx.mdx @@ -15,7 +15,7 @@ IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `url` | `` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The watsonx API key | +| `api_key` | `` | No | | The watsonx API key | | `project_id` | `str \| None` | No | | The Project ID key | | `timeout` | `` | No | 60 | Timeout for the HTTP requests | diff --git a/docs/docs/providers/safety/remote_bedrock.mdx b/docs/docs/providers/safety/remote_bedrock.mdx index 85bc662e8..e068f1fed 100644 --- a/docs/docs/providers/safety/remote_bedrock.mdx +++ b/docs/docs/providers/safety/remote_bedrock.mdx @@ -15,8 +15,8 @@ AWS Bedrock safety provider for content moderation using AWS's safety services. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `aws_access_key_id` | `str \| None` | No | | The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID | -| `aws_secret_access_key` | `pydantic.types.SecretStr \| None` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY | -| `aws_session_token` | `pydantic.types.SecretStr \| None` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN | +| `aws_secret_access_key` | `` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY | +| `aws_session_token` | `` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN | | `region_name` | `str \| None` | No | | The default AWS Region to use, for example, us-west-1 or us-west-2.Default use environment variable: AWS_DEFAULT_REGION | | `profile_name` | `str \| None` | No | | The profile name that contains credentials to use.Default use environment variable: AWS_PROFILE | | `total_max_attempts` | `int \| None` | No | | An integer representing the maximum number of attempts that will be made for a single request, including the initial attempt. Default use environment variable: AWS_MAX_ATTEMPTS | diff --git a/docs/docs/providers/safety/remote_sambanova.mdx b/docs/docs/providers/safety/remote_sambanova.mdx index da70fce6c..3a5a0db7d 100644 --- a/docs/docs/providers/safety/remote_sambanova.mdx +++ b/docs/docs/providers/safety/remote_sambanova.mdx @@ -15,7 +15,7 @@ SambaNova's safety provider for content moderation and safety filtering. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `url` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The SambaNova cloud API Key | +| `api_key` | `` | No | | The SambaNova cloud API Key | ## Sample Configuration diff --git a/docs/docs/providers/scoring/inline_braintrust.mdx b/docs/docs/providers/scoring/inline_braintrust.mdx index 67de0667a..93779b7ac 100644 --- a/docs/docs/providers/scoring/inline_braintrust.mdx +++ b/docs/docs/providers/scoring/inline_braintrust.mdx @@ -14,7 +14,7 @@ Braintrust scoring provider for evaluation and scoring using the Braintrust plat | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `openai_api_key` | `pydantic.types.SecretStr \| None` | No | | The OpenAI API Key | +| `openai_api_key` | `` | No | | The OpenAI API Key | ## Sample Configuration diff --git a/docs/docs/providers/tool_runtime/remote_bing-search.mdx b/docs/docs/providers/tool_runtime/remote_bing-search.mdx index 3085ab9ec..b7dabdd47 100644 --- a/docs/docs/providers/tool_runtime/remote_bing-search.mdx +++ b/docs/docs/providers/tool_runtime/remote_bing-search.mdx @@ -14,7 +14,7 @@ Bing Search tool for web search capabilities using Microsoft's search engine. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | | +| `api_key` | `` | No | | The Bing API key | | `top_k` | `` | No | 3 | | ## Sample Configuration diff --git a/docs/docs/providers/tool_runtime/remote_brave-search.mdx b/docs/docs/providers/tool_runtime/remote_brave-search.mdx index c71ac3d7a..084ae20f5 100644 --- a/docs/docs/providers/tool_runtime/remote_brave-search.mdx +++ b/docs/docs/providers/tool_runtime/remote_brave-search.mdx @@ -14,7 +14,7 @@ Brave Search tool for web search capabilities with privacy-focused results. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Brave Search API Key | +| `api_key` | `` | No | | The Brave Search API Key | | `max_results` | `` | No | 3 | The maximum number of results to return | ## Sample Configuration diff --git a/docs/docs/providers/tool_runtime/remote_tavily-search.mdx b/docs/docs/providers/tool_runtime/remote_tavily-search.mdx index 0fe263a5b..1c3429983 100644 --- a/docs/docs/providers/tool_runtime/remote_tavily-search.mdx +++ b/docs/docs/providers/tool_runtime/remote_tavily-search.mdx @@ -14,7 +14,7 @@ Tavily Search tool for AI-optimized web search with structured results. | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Tavily Search API Key | +| `api_key` | `` | No | | The Tavily Search API Key | | `max_results` | `` | No | 3 | The maximum number of results to return | ## Sample Configuration diff --git a/docs/docs/providers/vector_io/remote_pgvector.mdx b/docs/docs/providers/vector_io/remote_pgvector.mdx index cf0c28f12..6d3157753 100644 --- a/docs/docs/providers/vector_io/remote_pgvector.mdx +++ b/docs/docs/providers/vector_io/remote_pgvector.mdx @@ -217,7 +217,7 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de | `port` | `int \| None` | No | 5432 | | | `db` | `str \| None` | No | postgres | | | `user` | `str \| None` | No | postgres | | -| `password` | `pydantic.types.SecretStr \| None` | No | mysecretpassword | | +| `password` | `` | No | ********** | | | `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) | ## Sample Configuration diff --git a/llama_stack/providers/inline/scoring/braintrust/config.py b/llama_stack/providers/inline/scoring/braintrust/config.py index 088674afd..a2a52d610 100644 --- a/llama_stack/providers/inline/scoring/braintrust/config.py +++ b/llama_stack/providers/inline/scoring/braintrust/config.py @@ -9,8 +9,8 @@ from pydantic import BaseModel, Field, SecretStr class BraintrustScoringConfig(BaseModel): - openai_api_key: SecretStr | None = Field( - default=None, + openai_api_key: SecretStr = Field( + default=SecretStr(""), description="The OpenAI API Key", ) diff --git a/llama_stack/providers/remote/files/s3/config.py b/llama_stack/providers/remote/files/s3/config.py index 5cddd2b38..b7935902d 100644 --- a/llama_stack/providers/remote/files/s3/config.py +++ b/llama_stack/providers/remote/files/s3/config.py @@ -17,8 +17,8 @@ class S3FilesImplConfig(BaseModel): bucket_name: str = Field(description="S3 bucket name to store files") region: str = Field(default="us-east-1", description="AWS region where the bucket is located") aws_access_key_id: str | None = Field(default=None, description="AWS access key ID (optional if using IAM roles)") - aws_secret_access_key: SecretStr | None = Field( - default=None, description="AWS secret access key (optional if using IAM roles)" + aws_secret_access_key: SecretStr = Field( + default=SecretStr(""), description="AWS secret access key (optional if using IAM roles)" ) endpoint_url: str | None = Field(default=None, description="Custom S3 endpoint URL (for MinIO, LocalStack, etc.)") auto_create_bucket: bool = Field( diff --git a/llama_stack/providers/remote/inference/anthropic/config.py b/llama_stack/providers/remote/inference/anthropic/config.py index ae3034219..c28f05d24 100644 --- a/llama_stack/providers/remote/inference/anthropic/config.py +++ b/llama_stack/providers/remote/inference/anthropic/config.py @@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type class AnthropicProviderDataValidator(BaseModel): - anthropic_api_key: SecretStr | None = Field( - default=None, + anthropic_api_key: SecretStr = Field( + default=SecretStr(""), description="API key for Anthropic models", ) @json_schema_type class AnthropicConfig(BaseModel): - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="API key for Anthropic models", ) diff --git a/llama_stack/providers/remote/inference/azure/azure.py b/llama_stack/providers/remote/inference/azure/azure.py index a2c69b69c..5d650cc29 100644 --- a/llama_stack/providers/remote/inference/azure/azure.py +++ b/llama_stack/providers/remote/inference/azure/azure.py @@ -21,7 +21,7 @@ class AzureInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin): LiteLLMOpenAIMixin.__init__( self, litellm_provider_name="azure", - api_key_from_config=config.api_key.get_secret_value(), + api_key_from_config=config.api_key, provider_data_api_key_field="azure_api_key", openai_compat_api_base=str(config.api_base), ) diff --git a/llama_stack/providers/remote/inference/cerebras/config.py b/llama_stack/providers/remote/inference/cerebras/config.py index 519bd9119..0b737ea6c 100644 --- a/llama_stack/providers/remote/inference/cerebras/config.py +++ b/llama_stack/providers/remote/inference/cerebras/config.py @@ -21,7 +21,11 @@ class CerebrasImplConfig(BaseModel): description="Base URL for the Cerebras API", ) api_key: SecretStr = Field( +<<<<<<< HEAD default=SecretStr(os.environ.get("CEREBRAS_API_KEY")), +======= + default=SecretStr(os.environ.get("CEREBRAS_API_KEY", "")), +>>>>>>> a48f2009 (chore: use empty SecretStr values as default) description="Cerebras API Key", ) diff --git a/llama_stack/providers/remote/inference/fireworks/config.py b/llama_stack/providers/remote/inference/fireworks/config.py index cd28096a5..5bf0fcd88 100644 --- a/llama_stack/providers/remote/inference/fireworks/config.py +++ b/llama_stack/providers/remote/inference/fireworks/config.py @@ -18,8 +18,8 @@ class FireworksImplConfig(RemoteInferenceProviderConfig): default="https://api.fireworks.ai/inference/v1", description="The URL for the Fireworks server", ) - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The Fireworks.ai API Key", ) diff --git a/llama_stack/providers/remote/inference/gemini/config.py b/llama_stack/providers/remote/inference/gemini/config.py index 27897965c..4a69bd064 100644 --- a/llama_stack/providers/remote/inference/gemini/config.py +++ b/llama_stack/providers/remote/inference/gemini/config.py @@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type class GeminiProviderDataValidator(BaseModel): - gemini_api_key: SecretStr | None = Field( - default=None, + gemini_api_key: SecretStr = Field( + default=SecretStr(""), description="API key for Gemini models", ) @json_schema_type class GeminiConfig(BaseModel): - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="API key for Gemini models", ) diff --git a/llama_stack/providers/remote/inference/groq/config.py b/llama_stack/providers/remote/inference/groq/config.py index 1011e8da9..efc1f437b 100644 --- a/llama_stack/providers/remote/inference/groq/config.py +++ b/llama_stack/providers/remote/inference/groq/config.py @@ -12,17 +12,17 @@ from llama_stack.schema_utils import json_schema_type class GroqProviderDataValidator(BaseModel): - groq_api_key: SecretStr | None = Field( - default=None, + groq_api_key: SecretStr = Field( + default=SecretStr(""), description="API key for Groq models", ) @json_schema_type class GroqConfig(BaseModel): - api_key: SecretStr | None = Field( + api_key: SecretStr = Field( # The Groq client library loads the GROQ_API_KEY environment variable by default - default=None, + default=SecretStr(""), description="The Groq API key", ) diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/config.py b/llama_stack/providers/remote/inference/llama_openai_compat/config.py index 064cc860d..d20fed8e0 100644 --- a/llama_stack/providers/remote/inference/llama_openai_compat/config.py +++ b/llama_stack/providers/remote/inference/llama_openai_compat/config.py @@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type class LlamaProviderDataValidator(BaseModel): - llama_api_key: SecretStr | None = Field( - default=None, + llama_api_key: SecretStr = Field( + default=SecretStr(""), description="API key for api.llama models", ) @json_schema_type class LlamaCompatConfig(BaseModel): - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The Llama API key", ) diff --git a/llama_stack/providers/remote/inference/nvidia/config.py b/llama_stack/providers/remote/inference/nvidia/config.py index e1b791719..9ca2ffdd6 100644 --- a/llama_stack/providers/remote/inference/nvidia/config.py +++ b/llama_stack/providers/remote/inference/nvidia/config.py @@ -39,8 +39,8 @@ class NVIDIAConfig(BaseModel): default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"), description="A base url for accessing the NVIDIA NIM", ) - api_key: SecretStr | None = Field( - default_factory=lambda: SecretStr(os.getenv("NVIDIA_API_KEY")), + api_key: SecretStr = Field( + default_factory=lambda: SecretStr(os.getenv("NVIDIA_API_KEY", "")), description="The NVIDIA API key, only needed of using the hosted service", ) timeout: int = Field( diff --git a/llama_stack/providers/remote/inference/openai/config.py b/llama_stack/providers/remote/inference/openai/config.py index 7de7bcc32..b6f5f1a55 100644 --- a/llama_stack/providers/remote/inference/openai/config.py +++ b/llama_stack/providers/remote/inference/openai/config.py @@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type class OpenAIProviderDataValidator(BaseModel): - openai_api_key: SecretStr | None = Field( - default=None, + openai_api_key: SecretStr = Field( + default=SecretStr(""), description="API key for OpenAI models", ) @json_schema_type class OpenAIConfig(BaseModel): - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="API key for OpenAI models", ) base_url: str = Field( diff --git a/llama_stack/providers/remote/inference/passthrough/config.py b/llama_stack/providers/remote/inference/passthrough/config.py index 647b2db46..f57ce0e8a 100644 --- a/llama_stack/providers/remote/inference/passthrough/config.py +++ b/llama_stack/providers/remote/inference/passthrough/config.py @@ -18,8 +18,8 @@ class PassthroughImplConfig(BaseModel): description="The URL for the passthrough endpoint", ) - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="API Key for the passthrouth endpoint", ) diff --git a/llama_stack/providers/remote/inference/runpod/config.py b/llama_stack/providers/remote/inference/runpod/config.py index 60086acef..3c6c5afe9 100644 --- a/llama_stack/providers/remote/inference/runpod/config.py +++ b/llama_stack/providers/remote/inference/runpod/config.py @@ -17,8 +17,8 @@ class RunpodImplConfig(BaseModel): default=None, description="The URL for the Runpod model serving endpoint", ) - api_token: SecretStr | None = Field( - default=None, + api_token: SecretStr = Field( + default=SecretStr(""), description="The API token", ) diff --git a/llama_stack/providers/remote/inference/sambanova/config.py b/llama_stack/providers/remote/inference/sambanova/config.py index 50ad53d06..4637cb49e 100644 --- a/llama_stack/providers/remote/inference/sambanova/config.py +++ b/llama_stack/providers/remote/inference/sambanova/config.py @@ -12,8 +12,8 @@ from llama_stack.schema_utils import json_schema_type class SambaNovaProviderDataValidator(BaseModel): - sambanova_api_key: str | None = Field( - default=None, + sambanova_api_key: SecretStr = Field( + default=SecretStr(""), description="Sambanova Cloud API key", ) @@ -24,8 +24,8 @@ class SambaNovaImplConfig(BaseModel): default="https://api.sambanova.ai/v1", description="The URL for the SambaNova AI server", ) - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The SambaNova cloud API Key", ) diff --git a/llama_stack/providers/remote/inference/together/config.py b/llama_stack/providers/remote/inference/together/config.py index f6725333c..c15d42140 100644 --- a/llama_stack/providers/remote/inference/together/config.py +++ b/llama_stack/providers/remote/inference/together/config.py @@ -18,8 +18,8 @@ class TogetherImplConfig(RemoteInferenceProviderConfig): default="https://api.together.xyz/v1", description="The URL for the Together AI server", ) - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The Together AI API Key", ) diff --git a/llama_stack/providers/remote/inference/vertexai/vertexai.py b/llama_stack/providers/remote/inference/vertexai/vertexai.py index 733f2ab4a..581a00f29 100644 --- a/llama_stack/providers/remote/inference/vertexai/vertexai.py +++ b/llama_stack/providers/remote/inference/vertexai/vertexai.py @@ -24,12 +24,12 @@ class VertexAIInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin): LiteLLMOpenAIMixin.__init__( self, litellm_provider_name="vertex_ai", - api_key_from_config=None, # Vertex AI uses ADC, not API keys + api_key_from_config=SecretStr(""), # Vertex AI uses ADC, not API keys provider_data_api_key_field="vertex_project", # Use project for validation ) self.config = config - def get_api_key(self) -> str: + def get_api_key(self) -> SecretStr: """ Get an access token for Vertex AI using Application Default Credentials. @@ -40,7 +40,7 @@ class VertexAIInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin): # Get default credentials - will read from GOOGLE_APPLICATION_CREDENTIALS credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"]) credentials.refresh(google.auth.transport.requests.Request()) - return str(credentials.token) + return SecretStr(credentials.token) except Exception: # If we can't get credentials, return empty string to let LiteLLM handle it # This allows the LiteLLM mixin to work with ADC directly diff --git a/llama_stack/providers/remote/inference/vllm/__init__.py b/llama_stack/providers/remote/inference/vllm/__init__.py index 1f196e507..5b3ec6d5b 100644 --- a/llama_stack/providers/remote/inference/vllm/__init__.py +++ b/llama_stack/providers/remote/inference/vllm/__init__.py @@ -4,13 +4,16 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from pydantic import BaseModel +from pydantic import BaseModel, Field, SecretStr from .config import VLLMInferenceAdapterConfig class VLLMProviderDataValidator(BaseModel): - vllm_api_token: str | None = None + vllm_api_token: SecretStr = Field( + default=SecretStr(""), + description="API token for vLLM models", + ) async def get_adapter_impl(config: VLLMInferenceAdapterConfig, _deps): diff --git a/llama_stack/providers/remote/inference/vllm/config.py b/llama_stack/providers/remote/inference/vllm/config.py index 978427930..708a39be1 100644 --- a/llama_stack/providers/remote/inference/vllm/config.py +++ b/llama_stack/providers/remote/inference/vllm/config.py @@ -21,8 +21,8 @@ class VLLMInferenceAdapterConfig(BaseModel): default=4096, description="Maximum number of tokens to generate.", ) - api_token: SecretStr | None = Field( - default=SecretStr("fake"), + api_token: SecretStr = Field( + default=SecretStr(""), description="The API token", ) tls_verify: bool | str = Field( diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index 2e2e1ea5c..8fbb4b815 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -294,7 +294,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro self, model_entries=build_hf_repo_model_entries(), litellm_provider_name="vllm", - api_key_from_config=config.api_token.get_secret_value(), + api_key_from_config=config.api_token, provider_data_api_key_field="vllm_api_token", openai_compat_api_base=config.url, ) diff --git a/llama_stack/providers/remote/inference/watsonx/config.py b/llama_stack/providers/remote/inference/watsonx/config.py index 42c25d93e..a28de1226 100644 --- a/llama_stack/providers/remote/inference/watsonx/config.py +++ b/llama_stack/providers/remote/inference/watsonx/config.py @@ -24,8 +24,8 @@ class WatsonXConfig(BaseModel): default_factory=lambda: os.getenv("WATSONX_BASE_URL", "https://us-south.ml.cloud.ibm.com"), description="A base url for accessing the watsonx.ai", ) - api_key: SecretStr | None = Field( - default_factory=lambda: os.getenv("WATSONX_API_KEY"), + api_key: SecretStr = Field( + default_factory=lambda: SecretStr(os.getenv("WATSONX_API_KEY", "")), description="The watsonx API key", ) project_id: str | None = Field( diff --git a/llama_stack/providers/remote/safety/sambanova/config.py b/llama_stack/providers/remote/safety/sambanova/config.py index 2cde97098..814e5b1e5 100644 --- a/llama_stack/providers/remote/safety/sambanova/config.py +++ b/llama_stack/providers/remote/safety/sambanova/config.py @@ -12,8 +12,8 @@ from llama_stack.schema_utils import json_schema_type class SambaNovaProviderDataValidator(BaseModel): - sambanova_api_key: str | None = Field( - default=None, + sambanova_api_key: SecretStr = Field( + default=SecretStr(""), description="Sambanova Cloud API key", ) @@ -24,8 +24,8 @@ class SambaNovaSafetyConfig(BaseModel): default="https://api.sambanova.ai/v1", description="The URL for the SambaNova AI server", ) - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The SambaNova cloud API Key", ) diff --git a/llama_stack/providers/remote/tool_runtime/bing_search/config.py b/llama_stack/providers/remote/tool_runtime/bing_search/config.py index 3c4852fba..8ac4358ba 100644 --- a/llama_stack/providers/remote/tool_runtime/bing_search/config.py +++ b/llama_stack/providers/remote/tool_runtime/bing_search/config.py @@ -6,13 +6,16 @@ from typing import Any -from pydantic import BaseModel, SecretStr +from pydantic import BaseModel, Field, SecretStr class BingSearchToolConfig(BaseModel): """Configuration for Bing Search Tool Runtime""" - api_key: SecretStr | None = None + api_key: SecretStr = Field( + default=SecretStr(""), + description="The Bing API key", + ) top_k: int = 3 @classmethod diff --git a/llama_stack/providers/remote/tool_runtime/brave_search/config.py b/llama_stack/providers/remote/tool_runtime/brave_search/config.py index 41f6c9bbc..ddc711c12 100644 --- a/llama_stack/providers/remote/tool_runtime/brave_search/config.py +++ b/llama_stack/providers/remote/tool_runtime/brave_search/config.py @@ -10,8 +10,8 @@ from pydantic import BaseModel, Field, SecretStr class BraveSearchToolConfig(BaseModel): - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The Brave Search API Key", ) max_results: int = Field( diff --git a/llama_stack/providers/remote/tool_runtime/tavily_search/config.py b/llama_stack/providers/remote/tool_runtime/tavily_search/config.py index f81b3455a..c0de93114 100644 --- a/llama_stack/providers/remote/tool_runtime/tavily_search/config.py +++ b/llama_stack/providers/remote/tool_runtime/tavily_search/config.py @@ -10,8 +10,8 @@ from pydantic import BaseModel, Field, SecretStr class TavilySearchToolConfig(BaseModel): - api_key: SecretStr | None = Field( - default=None, + api_key: SecretStr = Field( + default=SecretStr(""), description="The Tavily Search API Key", ) max_results: int = Field( diff --git a/llama_stack/providers/remote/vector_io/pgvector/config.py b/llama_stack/providers/remote/vector_io/pgvector/config.py index abfa0eacf..1c6d0ed52 100644 --- a/llama_stack/providers/remote/vector_io/pgvector/config.py +++ b/llama_stack/providers/remote/vector_io/pgvector/config.py @@ -21,7 +21,7 @@ class PGVectorVectorIOConfig(BaseModel): port: int | None = Field(default=5432) db: str | None = Field(default="postgres") user: str | None = Field(default="postgres") - password: SecretStr | None = Field(default="mysecretpassword") + password: SecretStr = Field(default=SecretStr("mysecretpassword")) kvstore: KVStoreConfig | None = Field(description="Config for KV store backend (SQLite only for now)", default=None) @classmethod diff --git a/llama_stack/providers/utils/bedrock/config.py b/llama_stack/providers/utils/bedrock/config.py index b48f0ea69..2a5c8e882 100644 --- a/llama_stack/providers/utils/bedrock/config.py +++ b/llama_stack/providers/utils/bedrock/config.py @@ -14,12 +14,12 @@ class BedrockBaseConfig(BaseModel): default_factory=lambda: os.getenv("AWS_ACCESS_KEY_ID"), description="The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID", ) - aws_secret_access_key: SecretStr | None = Field( - default_factory=lambda: SecretStr(val) if (val := os.getenv("AWS_SECRET_ACCESS_KEY")) else None, + aws_secret_access_key: SecretStr = Field( + default_factory=lambda: SecretStr(os.getenv("AWS_SECRET_ACCESS_KEY", "")), description="The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY", ) - aws_session_token: SecretStr | None = Field( - default_factory=lambda: SecretStr(val) if (val := os.getenv("AWS_SESSION_TOKEN")) else None, + aws_session_token: SecretStr = Field( + default_factory=lambda: SecretStr(os.getenv("AWS_SESSION_TOKEN", "")), description="The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN", ) region_name: str | None = Field( diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index fc39852ae..8bdfbbbc9 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -69,7 +69,7 @@ class LiteLLMOpenAIMixin( def __init__( self, litellm_provider_name: str, - api_key_from_config: SecretStr | None, + api_key_from_config: SecretStr, provider_data_api_key_field: str, model_entries: list[ProviderModelEntry] | None = None, openai_compat_api_base: str | None = None, diff --git a/llama_stack/providers/utils/kvstore/config.py b/llama_stack/providers/utils/kvstore/config.py index 1cd90d308..baab4e372 100644 --- a/llama_stack/providers/utils/kvstore/config.py +++ b/llama_stack/providers/utils/kvstore/config.py @@ -74,7 +74,7 @@ class PostgresKVStoreConfig(CommonConfig): port: int = 5432 db: str = "llamastack" user: str - password: SecretStr | None = None + password: SecretStr = SecretStr("") ssl_mode: str | None = None ca_cert_path: str | None = None table_name: str = "llamastack_kvstore" @@ -118,7 +118,7 @@ class MongoDBKVStoreConfig(CommonConfig): port: int = 27017 db: str = "llamastack" user: str | None = None - password: SecretStr | None = None + password: SecretStr = SecretStr("") collection_name: str = "llamastack_kvstore" @classmethod diff --git a/llama_stack/providers/utils/sqlstore/sqlstore.py b/llama_stack/providers/utils/sqlstore/sqlstore.py index 3f6bedc7e..6eaafccfe 100644 --- a/llama_stack/providers/utils/sqlstore/sqlstore.py +++ b/llama_stack/providers/utils/sqlstore/sqlstore.py @@ -63,11 +63,11 @@ class PostgresSqlStoreConfig(SqlAlchemySqlStoreConfig): port: int = 5432 db: str = "llamastack" user: str - password: SecretStr | None = None + password: SecretStr = SecretStr("") @property def engine_str(self) -> str: - return f"postgresql+asyncpg://{self.user}:{self.password.get_secret_value() if self.password else ''}@{self.host}:{self.port}/{self.db}" + return f"postgresql+asyncpg://{self.user}:{self.password.get_secret_value()}@{self.host}:{self.port}/{self.db}" @classmethod def pip_packages(cls) -> list[str]: