Merge branch 'main' into openai-vector-store/qdrant

This commit is contained in:
ehhuang 2025-07-31 15:49:49 -07:00 committed by GitHub
commit 970d0f307f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
338 changed files with 15301 additions and 15997 deletions

View file

@ -40,7 +40,7 @@ from llama_stack.apis.inference import (
OpenAIUserMessageParam,
)
from llama_stack.apis.tools.tools import Tool, ToolGroups, ToolInvocationResult, ToolParameter, ToolRuntime
from llama_stack.distribution.access_control.access_control import default_policy
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.providers.inline.agents.meta_reference.openai_responses import (
OpenAIResponsesImpl,
)

View file

@ -12,7 +12,7 @@ import pytest
from llama_stack.apis.agents import Turn
from llama_stack.apis.inference import CompletionMessage, StopReason
from llama_stack.distribution.datatypes import User
from llama_stack.core.datatypes import User
from llama_stack.providers.inline.agents.meta_reference.persistence import AgentPersistence, AgentSessionInfo

View file

@ -7,7 +7,7 @@
import json
from unittest.mock import MagicMock
from llama_stack.distribution.request_headers import request_provider_data_context
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.providers.remote.inference.groq.config import GroqConfig
from llama_stack.providers.remote.inference.groq.groq import GroqInferenceAdapter
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig

View file

@ -0,0 +1,112 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import json
from unittest.mock import MagicMock
import pytest
from pydantic import BaseModel, Field
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
# Test fixtures and helper classes
class TestConfig(BaseModel):
api_key: str | None = Field(default=None)
class TestProviderDataValidator(BaseModel):
test_api_key: str | None = Field(default=None)
class TestLiteLLMAdapter(LiteLLMOpenAIMixin):
def __init__(self, config: TestConfig):
super().__init__(
model_entries=[],
litellm_provider_name="test",
api_key_from_config=config.api_key,
provider_data_api_key_field="test_api_key",
openai_compat_api_base=None,
)
@pytest.fixture
def adapter_with_config_key():
"""Fixture to create adapter with API key in config"""
config = TestConfig(api_key="config-api-key")
adapter = TestLiteLLMAdapter(config)
adapter.__provider_spec__ = MagicMock()
adapter.__provider_spec__.provider_data_validator = (
"tests.unit.providers.inference.test_litellm_openai_mixin.TestProviderDataValidator"
)
return adapter
@pytest.fixture
def adapter_without_config_key():
"""Fixture to create adapter without API key in config"""
config = TestConfig(api_key=None)
adapter = TestLiteLLMAdapter(config)
adapter.__provider_spec__ = MagicMock()
adapter.__provider_spec__.provider_data_validator = (
"tests.unit.providers.inference.test_litellm_openai_mixin.TestProviderDataValidator"
)
return adapter
def test_api_key_from_config_when_no_provider_data(adapter_with_config_key):
"""Test that adapter uses config API key when no provider data is available"""
api_key = adapter_with_config_key.get_api_key()
assert api_key == "config-api-key"
def test_provider_data_takes_priority_over_config(adapter_with_config_key):
"""Test that provider data API key overrides config API key"""
with request_provider_data_context(
{"x-llamastack-provider-data": json.dumps({"test_api_key": "provider-data-key"})}
):
api_key = adapter_with_config_key.get_api_key()
assert api_key == "provider-data-key"
def test_fallback_to_config_when_provider_data_missing_key(adapter_with_config_key):
"""Test fallback to config when provider data doesn't have the required key"""
with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"wrong_key": "some-value"})}):
api_key = adapter_with_config_key.get_api_key()
assert api_key == "config-api-key"
def test_error_when_no_api_key_available(adapter_without_config_key):
"""Test that ValueError is raised when neither config nor provider data have API key"""
with pytest.raises(ValueError, match="API key is not set"):
adapter_without_config_key.get_api_key()
def test_error_when_provider_data_has_wrong_key(adapter_without_config_key):
"""Test that ValueError is raised when provider data exists but doesn't have required key"""
with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"wrong_key": "some-value"})}):
with pytest.raises(ValueError, match="API key is not set"):
adapter_without_config_key.get_api_key()
def test_provider_data_works_when_config_is_none(adapter_without_config_key):
"""Test that provider data works even when config has no API key"""
with request_provider_data_context(
{"x-llamastack-provider-data": json.dumps({"test_api_key": "provider-only-key"})}
):
api_key = adapter_without_config_key.get_api_key()
assert api_key == "provider-only-key"
def test_error_message_includes_correct_field_names(adapter_without_config_key):
"""Test that error message includes correct field name and header information"""
try:
adapter_without_config_key.get_api_key()
raise AssertionError("Should have raised ValueError")
except ValueError as e:
assert "test_api_key" in str(e) # Should mention the correct field name
assert "x-llamastack-provider-data" in str(e) # Should mention header name

View file

@ -7,7 +7,7 @@
import os
from unittest.mock import AsyncMock, MagicMock, patch
from llama_stack.distribution.stack import replace_env_vars
from llama_stack.core.stack import replace_env_vars
from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
from llama_stack.providers.remote.inference.openai.openai import OpenAIInferenceAdapter

View file

@ -19,7 +19,7 @@ from llama_stack.apis.post_training.post_training import (
OptimizerType,
TrainingConfig,
)
from llama_stack.distribution.library_client import convert_pydantic_to_json_value
from llama_stack.core.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import (
NvidiaPostTrainingAdapter,
NvidiaPostTrainingConfig,

View file

@ -19,7 +19,7 @@ from llama_stack.apis.post_training.post_training import (
QATFinetuningConfig,
TrainingConfig,
)
from llama_stack.distribution.library_client import convert_pydantic_to_json_value
from llama_stack.core.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import (
ListNvidiaPostTrainingJobs,
NvidiaPostTrainingAdapter,

View file

@ -7,8 +7,8 @@
import pytest
from pydantic import BaseModel
from llama_stack.distribution.distribution import get_provider_registry, providable_apis
from llama_stack.distribution.utils.dynamic import instantiate_class_type
from llama_stack.core.distribution import get_provider_registry, providable_apis
from llama_stack.core.utils.dynamic import instantiate_class_type
class TestProviderConfigurations: