Merge remote-tracking branch 'upstream/main' into url

This commit is contained in:
Charlie Doern 2025-11-19 08:35:19 -05:00
commit b6aca4d366
79 changed files with 424 additions and 348 deletions

View file

@ -23,7 +23,7 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage
@ -38,6 +38,9 @@ async def service():
},
stores=ServerStoresConfig(
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
metadata=None,
inference=None,
prompts=None,
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
@ -142,6 +145,9 @@ async def test_policy_configuration():
},
stores=ServerStoresConfig(
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
metadata=None,
inference=None,
prompts=None,
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})

View file

@ -10,8 +10,9 @@ from unittest.mock import AsyncMock
import pytest
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, VectorStoresConfig
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config
from llama_stack.core.storage.datatypes import ServerStoresConfig, StorageConfig
from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
@ -21,7 +22,15 @@ class TestVectorStoresValidation:
run_config = StackRunConfig(
image_name="test",
providers={},
storage=StorageConfig(backends={}, stores={}),
storage=StorageConfig(
backends={},
stores=ServerStoresConfig(
metadata=None,
inference=None,
conversations=None,
prompts=None,
),
),
vector_stores=VectorStoresConfig(
default_provider_id="faiss",
default_embedding_model=QualifiedModel(
@ -41,7 +50,15 @@ class TestVectorStoresValidation:
run_config = StackRunConfig(
image_name="test",
providers={},
storage=StorageConfig(backends={}, stores={}),
storage=StorageConfig(
backends={},
stores=ServerStoresConfig(
metadata=None,
inference=None,
conversations=None,
prompts=None,
),
),
vector_stores=VectorStoresConfig(
default_provider_id="faiss",
default_embedding_model=QualifiedModel(

View file

@ -9,11 +9,11 @@ import pytest
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.files.localfs import (
LocalfsFilesImpl,
LocalfsFilesImplConfig,
)
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError

View file

@ -6,9 +6,9 @@
import pytest
from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
from llama_stack.core.storage.kvstore.sqlite import SqliteKVStoreImpl
from llama_stack.core.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
from llama_stack.providers.utils.kvstore.sqlite import SqliteKVStoreImpl
@pytest.fixture(scope="function")

View file

@ -18,7 +18,7 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
from llama_stack.providers.utils.kvstore import register_kvstore_backends
from llama_stack.core.storage.kvstore import register_kvstore_backends
@pytest.fixture

View file

@ -17,6 +17,7 @@ from openai.types.chat.chat_completion_chunk import (
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
OpenAIResponsesImpl,
)
@ -24,7 +25,6 @@ from llama_stack.providers.utils.responses.responses_store import (
ResponsesStore,
_OpenAIResponseObjectWithInputAndMessages,
)
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api.agents import Order
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,

View file

@ -13,9 +13,9 @@ from unittest.mock import AsyncMock
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.providers.inline.batches.reference.batches import ReferenceBatchesImpl
from llama_stack.providers.inline.batches.reference.config import ReferenceBatchesImplConfig
from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
@pytest.fixture

View file

@ -9,8 +9,8 @@ import pytest
from moto import mock_aws
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.remote.files.s3 import S3FilesImplConfig, get_adapter_impl
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
class MockUploadFile:

View file

@ -18,11 +18,11 @@ async def test_listing_hides_other_users_file(s3_provider, sample_text_file):
user_a = User("user-a", {"roles": ["team-a"]})
user_b = User("user-b", {"roles": ["team-b"]})
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
listed = await s3_provider.openai_list_files()
assert all(f.id != uploaded.id for f in listed.data)
@ -41,11 +41,11 @@ async def test_cannot_access_other_user_file(s3_provider, sample_text_file, op):
user_a = User("user-a", {"roles": ["team-a"]})
user_b = User("user-b", {"roles": ["team-b"]})
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
with pytest.raises(ResourceNotFoundError):
await op(s3_provider, uploaded.id)
@ -56,11 +56,11 @@ async def test_shared_role_allows_listing(s3_provider, sample_text_file):
user_a = User("user-a", {"roles": ["shared-role"]})
user_b = User("user-b", {"roles": ["shared-role"]})
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
listed = await s3_provider.openai_list_files()
assert any(f.id == uploaded.id for f in listed.data)
@ -79,10 +79,10 @@ async def test_shared_role_allows_access(s3_provider, sample_text_file, op):
user_x = User("user-x", {"roles": ["shared-role"]})
user_y = User("user-y", {"roles": ["shared-role"]})
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_x
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_y
await op(s3_provider, uploaded.id)

View file

@ -11,13 +11,13 @@ import numpy as np
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.core.storage.kvstore import register_kvstore_backends
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
from llama_stack.providers.inline.vector_io.sqlite_vec import SQLiteVectorIOConfig
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteVecIndex, SQLiteVecVectorIOAdapter
from llama_stack.providers.remote.vector_io.pgvector.config import PGVectorVectorIOConfig
from llama_stack.providers.remote.vector_io.pgvector.pgvector import PGVectorIndex, PGVectorVectorIOAdapter
from llama_stack.providers.utils.kvstore import register_kvstore_backends
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore
EMBEDDING_DIMENSION = 768
@ -279,7 +279,7 @@ async def pgvector_vec_adapter(unique_kvstore_config, mock_inference_api, embedd
) as mock_check_version:
mock_check_version.return_value = "0.5.1"
with patch("llama_stack.providers.utils.kvstore.kvstore_impl") as mock_kvstore_impl:
with patch("llama_stack.core.storage.kvstore.kvstore_impl") as mock_kvstore_impl:
mock_kvstore = AsyncMock()
mock_kvstore_impl.return_value = mock_kvstore

View file

@ -9,12 +9,12 @@ import pytest
from llama_stack.core.datatypes import VectorStoreWithOwner
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.core.store.registry import (
KEY_FORMAT,
CachedDiskDistributionRegistry,
DiskDistributionRegistry,
)
from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack_api import Model, VectorStore

View file

@ -15,7 +15,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
from llama_stack.core.datatypes import QuotaConfig, QuotaPeriod
from llama_stack.core.server.quota import QuotaMiddleware
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.providers.utils.kvstore import register_kvstore_backends
from llama_stack.core.storage.kvstore import register_kvstore_backends
@pytest.fixture

View file

@ -24,8 +24,8 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
from llama_stack.providers.utils.kvstore import register_kvstore_backends
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.core.storage.kvstore import register_kvstore_backends
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec

View file

@ -9,8 +9,8 @@ import time
import pytest
from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.inference.inference_store import InferenceStore
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,

View file

@ -5,8 +5,8 @@
# the root directory of this source tree.
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
from llama_stack.providers.utils.kvstore.sqlite.sqlite import SqliteKVStoreImpl
from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
from llama_stack.core.storage.kvstore.sqlite.sqlite import SqliteKVStoreImpl
async def test_memory_kvstore_persistence_behavior():

View file

@ -11,8 +11,8 @@ from uuid import uuid4
import pytest
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order

View file

@ -9,9 +9,9 @@ from tempfile import TemporaryDirectory
import pytest
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
async def test_sqlite_sqlstore():

View file

@ -10,13 +10,13 @@ from unittest.mock import patch
from llama_stack.core.access_control.access_control import default_policy, is_action_allowed
from llama_stack.core.access_control.datatypes import Action
from llama_stack.core.datatypes import User
from llama_stack.providers.utils.sqlstore.api import ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
from llama_stack.core.storage.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
from llama_stack_api.internal.sqlstore import ColumnType
@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_fetch_with_where_sql_access_control(mock_get_authenticated_user):
"""Test that fetch_all works correctly with where_sql for access control"""
with TemporaryDirectory() as tmp_dir:
@ -78,7 +78,7 @@ async def test_authorized_fetch_with_where_sql_access_control(mock_get_authentic
assert row["title"] == "User Document"
@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_sql_policy_consistency(mock_get_authenticated_user):
"""Test that SQL WHERE clause logic exactly matches is_action_allowed policy logic"""
with TemporaryDirectory() as tmp_dir:
@ -164,7 +164,7 @@ async def test_sql_policy_consistency(mock_get_authenticated_user):
)
@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_store_user_attribute_capture(mock_get_authenticated_user):
"""Test that user attributes are properly captured during insert"""
with TemporaryDirectory() as tmp_dir: