chore: use empty SecretStr values as default

Better than using SecretStr | None so we centralize the null handling.

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-09-22 14:21:10 +02:00
parent c4cb6aa8d9
commit 4af141292f
No known key found for this signature in database
51 changed files with 103 additions and 93 deletions

View file

@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type
class AnthropicProviderDataValidator(BaseModel):
anthropic_api_key: SecretStr | None = Field(
default=None,
anthropic_api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for Anthropic models",
)
@json_schema_type
class AnthropicConfig(BaseModel):
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for Anthropic models",
)

View file

@ -21,7 +21,7 @@ class AzureInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
LiteLLMOpenAIMixin.__init__(
self,
litellm_provider_name="azure",
api_key_from_config=config.api_key.get_secret_value(),
api_key_from_config=config.api_key,
provider_data_api_key_field="azure_api_key",
openai_compat_api_base=str(config.api_base),
)

View file

@ -21,7 +21,11 @@ class CerebrasImplConfig(BaseModel):
description="Base URL for the Cerebras API",
)
api_key: SecretStr = Field(
<<<<<<< HEAD
default=SecretStr(os.environ.get("CEREBRAS_API_KEY")),
=======
default=SecretStr(os.environ.get("CEREBRAS_API_KEY", "")),
>>>>>>> a48f2009 (chore: use empty SecretStr values as default)
description="Cerebras API Key",
)

View file

@ -18,8 +18,8 @@ class FireworksImplConfig(RemoteInferenceProviderConfig):
default="https://api.fireworks.ai/inference/v1",
description="The URL for the Fireworks server",
)
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="The Fireworks.ai API Key",
)

View file

@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type
class GeminiProviderDataValidator(BaseModel):
gemini_api_key: SecretStr | None = Field(
default=None,
gemini_api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for Gemini models",
)
@json_schema_type
class GeminiConfig(BaseModel):
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for Gemini models",
)

View file

@ -12,17 +12,17 @@ from llama_stack.schema_utils import json_schema_type
class GroqProviderDataValidator(BaseModel):
groq_api_key: SecretStr | None = Field(
default=None,
groq_api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for Groq models",
)
@json_schema_type
class GroqConfig(BaseModel):
api_key: SecretStr | None = Field(
api_key: SecretStr = Field(
# The Groq client library loads the GROQ_API_KEY environment variable by default
default=None,
default=SecretStr(""),
description="The Groq API key",
)

View file

@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type
class LlamaProviderDataValidator(BaseModel):
llama_api_key: SecretStr | None = Field(
default=None,
llama_api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for api.llama models",
)
@json_schema_type
class LlamaCompatConfig(BaseModel):
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="The Llama API key",
)

View file

@ -39,8 +39,8 @@ class NVIDIAConfig(BaseModel):
default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"),
description="A base url for accessing the NVIDIA NIM",
)
api_key: SecretStr | None = Field(
default_factory=lambda: SecretStr(os.getenv("NVIDIA_API_KEY")),
api_key: SecretStr = Field(
default_factory=lambda: SecretStr(os.getenv("NVIDIA_API_KEY", "")),
description="The NVIDIA API key, only needed of using the hosted service",
)
timeout: int = Field(

View file

@ -12,16 +12,16 @@ from llama_stack.schema_utils import json_schema_type
class OpenAIProviderDataValidator(BaseModel):
openai_api_key: SecretStr | None = Field(
default=None,
openai_api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for OpenAI models",
)
@json_schema_type
class OpenAIConfig(BaseModel):
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="API key for OpenAI models",
)
base_url: str = Field(

View file

@ -18,8 +18,8 @@ class PassthroughImplConfig(BaseModel):
description="The URL for the passthrough endpoint",
)
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="API Key for the passthrouth endpoint",
)

View file

@ -17,8 +17,8 @@ class RunpodImplConfig(BaseModel):
default=None,
description="The URL for the Runpod model serving endpoint",
)
api_token: SecretStr | None = Field(
default=None,
api_token: SecretStr = Field(
default=SecretStr(""),
description="The API token",
)

View file

@ -12,8 +12,8 @@ from llama_stack.schema_utils import json_schema_type
class SambaNovaProviderDataValidator(BaseModel):
sambanova_api_key: str | None = Field(
default=None,
sambanova_api_key: SecretStr = Field(
default=SecretStr(""),
description="Sambanova Cloud API key",
)
@ -24,8 +24,8 @@ class SambaNovaImplConfig(BaseModel):
default="https://api.sambanova.ai/v1",
description="The URL for the SambaNova AI server",
)
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="The SambaNova cloud API Key",
)

View file

@ -18,8 +18,8 @@ class TogetherImplConfig(RemoteInferenceProviderConfig):
default="https://api.together.xyz/v1",
description="The URL for the Together AI server",
)
api_key: SecretStr | None = Field(
default=None,
api_key: SecretStr = Field(
default=SecretStr(""),
description="The Together AI API Key",
)

View file

@ -24,12 +24,12 @@ class VertexAIInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
LiteLLMOpenAIMixin.__init__(
self,
litellm_provider_name="vertex_ai",
api_key_from_config=None, # Vertex AI uses ADC, not API keys
api_key_from_config=SecretStr(""), # Vertex AI uses ADC, not API keys
provider_data_api_key_field="vertex_project", # Use project for validation
)
self.config = config
def get_api_key(self) -> str:
def get_api_key(self) -> SecretStr:
"""
Get an access token for Vertex AI using Application Default Credentials.
@ -40,7 +40,7 @@ class VertexAIInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
# Get default credentials - will read from GOOGLE_APPLICATION_CREDENTIALS
credentials, _ = default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
credentials.refresh(google.auth.transport.requests.Request())
return str(credentials.token)
return SecretStr(credentials.token)
except Exception:
# If we can't get credentials, return empty string to let LiteLLM handle it
# This allows the LiteLLM mixin to work with ADC directly

View file

@ -4,13 +4,16 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from pydantic import BaseModel
from pydantic import BaseModel, Field, SecretStr
from .config import VLLMInferenceAdapterConfig
class VLLMProviderDataValidator(BaseModel):
vllm_api_token: str | None = None
vllm_api_token: SecretStr = Field(
default=SecretStr(""),
description="API token for vLLM models",
)
async def get_adapter_impl(config: VLLMInferenceAdapterConfig, _deps):

View file

@ -21,8 +21,8 @@ class VLLMInferenceAdapterConfig(BaseModel):
default=4096,
description="Maximum number of tokens to generate.",
)
api_token: SecretStr | None = Field(
default=SecretStr("fake"),
api_token: SecretStr = Field(
default=SecretStr(""),
description="The API token",
)
tls_verify: bool | str = Field(

View file

@ -294,7 +294,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
self,
model_entries=build_hf_repo_model_entries(),
litellm_provider_name="vllm",
api_key_from_config=config.api_token.get_secret_value(),
api_key_from_config=config.api_token,
provider_data_api_key_field="vllm_api_token",
openai_compat_api_base=config.url,
)

View file

@ -24,8 +24,8 @@ class WatsonXConfig(BaseModel):
default_factory=lambda: os.getenv("WATSONX_BASE_URL", "https://us-south.ml.cloud.ibm.com"),
description="A base url for accessing the watsonx.ai",
)
api_key: SecretStr | None = Field(
default_factory=lambda: os.getenv("WATSONX_API_KEY"),
api_key: SecretStr = Field(
default_factory=lambda: SecretStr(os.getenv("WATSONX_API_KEY", "")),
description="The watsonx API key",
)
project_id: str | None = Field(