mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
fix: prevent telemetry from leaking sensitive info
Prevent sensitive information from being logged in telemetry output by assigning SecretStr type to sensitive fields. API keys, password from KV store are now covered. All providers have been converted. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
8dc9fd6844
commit
c4cb6aa8d9
53 changed files with 121 additions and 109 deletions
|
@ -8,6 +8,7 @@ from typing import Any
|
|||
|
||||
import google.auth.transport.requests
|
||||
from google.auth import default
|
||||
from pydantic import SecretStr
|
||||
|
||||
from llama_stack.apis.inference import ChatCompletionRequest
|
||||
from llama_stack.providers.utils.inference.litellm_openai_mixin import (
|
||||
|
@ -43,7 +44,7 @@ class VertexAIInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
|
|||
except Exception:
|
||||
# If we can't get credentials, return empty string to let LiteLLM handle it
|
||||
# This allows the LiteLLM mixin to work with ADC directly
|
||||
return ""
|
||||
return SecretStr("")
|
||||
|
||||
def get_base_url(self) -> str:
|
||||
"""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue