fix: prevent telemetry from leaking sensitive info

Prevent sensitive information from being logged in telemetry output by
assigning SecretStr type to sensitive fields. API keys, password from
KV store are now covered. All providers have been converted.

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-08-08 15:54:45 +02:00
parent 8dc9fd6844
commit c4cb6aa8d9
No known key found for this signature in database
53 changed files with 121 additions and 109 deletions

View file

@ -4,6 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -27,7 +28,7 @@ class AnthropicInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
LiteLLMOpenAIMixin.__init__(
self,
litellm_provider_name="anthropic",
api_key_from_config=config.api_key,
api_key_from_config=config.api_key.get_secret_value() if config.api_key else None,
provider_data_api_key_field="anthropic_api_key",
)
self.config = config

View file

@ -6,13 +6,13 @@
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
class AnthropicProviderDataValidator(BaseModel):
anthropic_api_key: str | None = Field(
anthropic_api_key: SecretStr | None = Field(
default=None,
description="API key for Anthropic models",
)
@ -20,7 +20,7 @@ class AnthropicProviderDataValidator(BaseModel):
@json_schema_type
class AnthropicConfig(BaseModel):
api_key: str | None = Field(
api_key: SecretStr | None = Field(
default=None,
description="API key for Anthropic models",
)

View file

@ -6,13 +6,13 @@
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
class GeminiProviderDataValidator(BaseModel):
gemini_api_key: str | None = Field(
gemini_api_key: SecretStr | None = Field(
default=None,
description="API key for Gemini models",
)
@ -20,7 +20,7 @@ class GeminiProviderDataValidator(BaseModel):
@json_schema_type
class GeminiConfig(BaseModel):
api_key: str | None = Field(
api_key: SecretStr | None = Field(
default=None,
description="API key for Gemini models",
)

View file

@ -4,6 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
@ -19,7 +20,7 @@ class GeminiInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
LiteLLMOpenAIMixin.__init__(
self,
litellm_provider_name="gemini",
api_key_from_config=config.api_key,
api_key_from_config=config.api_key.get_secret_value() if config.api_key else None,
provider_data_api_key_field="gemini_api_key",
)
self.config = config

View file

@ -6,13 +6,13 @@
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
class GroqProviderDataValidator(BaseModel):
groq_api_key: str | None = Field(
groq_api_key: SecretStr | None = Field(
default=None,
description="API key for Groq models",
)
@ -20,7 +20,7 @@ class GroqProviderDataValidator(BaseModel):
@json_schema_type
class GroqConfig(BaseModel):
api_key: str | None = Field(
api_key: SecretStr | None = Field(
# The Groq client library loads the GROQ_API_KEY environment variable by default
default=None,
description="The Groq API key",

View file

@ -6,13 +6,13 @@
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
class LlamaProviderDataValidator(BaseModel):
llama_api_key: str | None = Field(
llama_api_key: SecretStr | None = Field(
default=None,
description="API key for api.llama models",
)
@ -20,7 +20,7 @@ class LlamaProviderDataValidator(BaseModel):
@json_schema_type
class LlamaCompatConfig(BaseModel):
api_key: str | None = Field(
api_key: SecretStr | None = Field(
default=None,
description="The Llama API key",
)

View file

@ -6,13 +6,13 @@
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
class OpenAIProviderDataValidator(BaseModel):
openai_api_key: str | None = Field(
openai_api_key: SecretStr | None = Field(
default=None,
description="API key for OpenAI models",
)
@ -20,7 +20,7 @@ class OpenAIProviderDataValidator(BaseModel):
@json_schema_type
class OpenAIConfig(BaseModel):
api_key: str | None = Field(
api_key: SecretStr | None = Field(
default=None,
description="API key for OpenAI models",
)

View file

@ -6,7 +6,7 @@
from typing import Any
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, SecretStr
from llama_stack.schema_utils import json_schema_type
@ -17,7 +17,7 @@ class RunpodImplConfig(BaseModel):
default=None,
description="The URL for the Runpod model serving endpoint",
)
api_token: str | None = Field(
api_token: SecretStr | None = Field(
default=None,
description="The API token",
)

View file

@ -103,7 +103,10 @@ class RunpodInferenceAdapter(
tool_config=tool_config,
)
client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
client = OpenAI(
base_url=self.config.url,
api_key=self.config.api_token.get_secret_value() if self.config.api_token else None,
)
if stream:
return self._stream_chat_completion(request, client)
else:

View file

@ -8,6 +8,7 @@ from typing import Any
import google.auth.transport.requests
from google.auth import default
from pydantic import SecretStr
from llama_stack.apis.inference import ChatCompletionRequest
from llama_stack.providers.utils.inference.litellm_openai_mixin import (
@ -43,7 +44,7 @@ class VertexAIInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
except Exception:
# If we can't get credentials, return empty string to let LiteLLM handle it
# This allows the LiteLLM mixin to work with ADC directly
return ""
return SecretStr("")
def get_base_url(self) -> str:
"""

View file

@ -6,7 +6,7 @@
from pathlib import Path
from pydantic import BaseModel, Field, field_validator
from pydantic import BaseModel, Field, SecretStr, field_validator
from llama_stack.schema_utils import json_schema_type
@ -21,8 +21,8 @@ class VLLMInferenceAdapterConfig(BaseModel):
default=4096,
description="Maximum number of tokens to generate.",
)
api_token: str | None = Field(
default="fake",
api_token: SecretStr | None = Field(
default=SecretStr("fake"),
description="The API token",
)
tls_verify: bool | str = Field(

View file

@ -294,7 +294,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
self,
model_entries=build_hf_repo_model_entries(),
litellm_provider_name="vllm",
api_key_from_config=config.api_token,
api_key_from_config=config.api_token.get_secret_value(),
provider_data_api_key_field="vllm_api_token",
openai_compat_api_base=config.url,
)