feat: load config class when doing variable substitution

When using bash style substitution env variable in distribution
template, we are processing the string and convert it to the type
associated with the provider's config class. This allows us to return
the proper type. This is crucial for api key since they are not strings
anymore but SecretStr. If the key is unset we will get an empty string
which will result in a Pydantic error like:

```
ERROR    2025-09-25 21:40:44,565 __main__:527 core::server: Error creating app: 1 validation error for AnthropicConfig
         api_key
           Input should be a valid string
             For further information visit
             https://errors.pydantic.dev/2.11/v/string_type
```

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-09-25 10:27:41 +02:00
parent 4af141292f
commit bc64635835
No known key found for this signature in database
79 changed files with 381 additions and 216 deletions

View file

@ -8,19 +8,20 @@ import json
from unittest.mock import MagicMock
import pytest
from pydantic import BaseModel, Field, SecretStr
from pydantic import BaseModel, Field
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.core.secret_types import MySecretStr
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
# Test fixtures and helper classes
class TestConfig(BaseModel):
api_key: SecretStr | None = Field(default=None)
api_key: MySecretStr | None = Field(default=None)
class TestProviderDataValidator(BaseModel):
test_api_key: SecretStr | None = Field(default=None)
test_api_key: MySecretStr | None = Field(default=None)
class TestLiteLLMAdapter(LiteLLMOpenAIMixin):
@ -36,7 +37,7 @@ class TestLiteLLMAdapter(LiteLLMOpenAIMixin):
@pytest.fixture
def adapter_with_config_key():
"""Fixture to create adapter with API key in config"""
config = TestConfig(api_key=SecretStr("config-api-key"))
config = TestConfig(api_key=MySecretStr("config-api-key"))
adapter = TestLiteLLMAdapter(config)
adapter.__provider_spec__ = MagicMock()
adapter.__provider_spec__.provider_data_validator = (

View file

@ -7,9 +7,14 @@
import os
from unittest.mock import MagicMock, patch
from pydantic import SecretStr
from llama_stack.core.secret_types import MySecretStr
# Wrapper for backward compatibility in tests
def replace_env_vars_compat(config, path=""):
return replace_env_vars_compat(config, path, None, None)
from llama_stack.core.stack import replace_env_vars
from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
from llama_stack.providers.remote.inference.openai.openai import OpenAIInferenceAdapter
@ -37,7 +42,7 @@ class TestOpenAIBaseURLConfig:
"""Test that the adapter uses base URL from OPENAI_BASE_URL environment variable."""
# Use sample_run_config which has proper environment variable syntax
config_data = OpenAIConfig.sample_run_config(api_key="test-key")
processed_config = replace_env_vars(config_data)
processed_config = replace_env_vars_compat(config_data)
config = OpenAIConfig.model_validate(processed_config)
adapter = OpenAIInferenceAdapter(config)
@ -61,14 +66,14 @@ class TestOpenAIBaseURLConfig:
adapter = OpenAIInferenceAdapter(config)
# Mock the get_api_key method since it's delegated to LiteLLMOpenAIMixin
adapter.get_api_key = MagicMock(return_value=SecretStr("test-key"))
adapter.get_api_key = MagicMock(return_value=MySecretStr("test-key"))
# Access the client property to trigger AsyncOpenAI initialization
_ = adapter.client
# Verify AsyncOpenAI was called with the correct base_url
mock_openai_class.assert_called_once_with(
api_key=SecretStr("test-key"),
api_key=MySecretStr("test-key"),
base_url=custom_url,
)
@ -80,7 +85,7 @@ class TestOpenAIBaseURLConfig:
adapter = OpenAIInferenceAdapter(config)
# Mock the get_api_key method
adapter.get_api_key = MagicMock(return_value=SecretStr("test-key"))
adapter.get_api_key = MagicMock(return_value=MySecretStr("test-key"))
# Mock a model object that will be returned by models.list()
mock_model = MagicMock()
@ -103,7 +108,7 @@ class TestOpenAIBaseURLConfig:
# Verify the client was created with the custom URL
mock_openai_class.assert_called_with(
api_key=SecretStr("test-key"),
api_key=MySecretStr("test-key"),
base_url=custom_url,
)
@ -116,12 +121,12 @@ class TestOpenAIBaseURLConfig:
"""Test that setting OPENAI_BASE_URL environment variable affects where model availability is checked."""
# Use sample_run_config which has proper environment variable syntax
config_data = OpenAIConfig.sample_run_config(api_key="test-key")
processed_config = replace_env_vars(config_data)
processed_config = replace_env_vars_compat(config_data)
config = OpenAIConfig.model_validate(processed_config)
adapter = OpenAIInferenceAdapter(config)
# Mock the get_api_key method
adapter.get_api_key = MagicMock(return_value=SecretStr("test-key"))
adapter.get_api_key = MagicMock(return_value=MySecretStr("test-key"))
# Mock a model object that will be returned by models.list()
mock_model = MagicMock()
@ -144,6 +149,6 @@ class TestOpenAIBaseURLConfig:
# Verify the client was created with the environment variable URL
mock_openai_class.assert_called_with(
api_key=SecretStr("test-key"),
api_key=MySecretStr("test-key"),
base_url="https://proxy.openai.com/v1",
)