diff --git a/.github/workflows/integration-auth-tests.yml b/.github/workflows/integration-auth-tests.yml index 30a8063ea..c13ed6cbe 100644 --- a/.github/workflows/integration-auth-tests.yml +++ b/.github/workflows/integration-auth-tests.yml @@ -91,6 +91,9 @@ jobs: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default server: port: 8321 EOF diff --git a/benchmarking/k8s-benchmark/stack-configmap.yaml b/benchmarking/k8s-benchmark/stack-configmap.yaml index 8fbf09fce..58518ec18 100644 --- a/benchmarking/k8s-benchmark/stack-configmap.yaml +++ b/benchmarking/k8s-benchmark/stack-configmap.yaml @@ -107,13 +107,21 @@ data: db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} - references: + stores: metadata: backend: kv_default namespace: registry inference: backend: sql_default table_name: inference_store + max_write_queue_size: 10000 + num_writers: 4 + conversations: + backend: sql_default + table_name: openai_conversations + prompts: + backend: kv_default + namespace: prompts models: - metadata: embedding_dimension: 768 diff --git a/benchmarking/k8s-benchmark/stack_run_config.yaml b/benchmarking/k8s-benchmark/stack_run_config.yaml index 88f4b0fef..7992eb3c7 100644 --- a/benchmarking/k8s-benchmark/stack_run_config.yaml +++ b/benchmarking/k8s-benchmark/stack_run_config.yaml @@ -100,6 +100,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: diff --git a/docs/docs/distributions/configuration.mdx b/docs/docs/distributions/configuration.mdx index 910a0ed05..ff50c406a 100644 --- a/docs/docs/distributions/configuration.mdx +++ b/docs/docs/distributions/configuration.mdx @@ -58,13 +58,21 @@ storage: sql_default: type: sql_sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/sqlstore.db - references: + stores: metadata: backend: kv_default namespace: registry inference: backend: sql_default table_name: inference_store + max_write_queue_size: 10000 + num_writers: 4 + conversations: + backend: sql_default + table_name: openai_conversations + prompts: + backend: kv_default + namespace: prompts models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/docs/docs/distributions/k8s/stack-configmap.yaml b/docs/docs/distributions/k8s/stack-configmap.yaml index c71ab05d8..255e39ac2 100644 --- a/docs/docs/distributions/k8s/stack-configmap.yaml +++ b/docs/docs/distributions/k8s/stack-configmap.yaml @@ -113,13 +113,21 @@ data: db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} - references: + stores: metadata: backend: kv_default namespace: registry inference: backend: sql_default table_name: inference_store + max_write_queue_size: 10000 + num_writers: 4 + conversations: + backend: sql_default + table_name: openai_conversations + prompts: + backend: kv_default + namespace: prompts models: - metadata: embedding_dimension: 768 diff --git a/docs/docs/distributions/k8s/stack_run_config.yaml b/docs/docs/distributions/k8s/stack_run_config.yaml index 1bfa5ac25..3dde74bbf 100644 --- a/docs/docs/distributions/k8s/stack_run_config.yaml +++ b/docs/docs/distributions/k8s/stack_run_config.yaml @@ -106,6 +106,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: diff --git a/llama_stack/core/datatypes.py b/llama_stack/core/datatypes.py index d7175100e..95907adcf 100644 --- a/llama_stack/core/datatypes.py +++ b/llama_stack/core/datatypes.py @@ -589,6 +589,7 @@ can be instantiated multiple times (with different configs) if necessary. _ensure_backend(stores.inference, sql_backends, "storage.stores.inference") _ensure_backend(stores.conversations, sql_backends, "storage.stores.conversations") _ensure_backend(stores.responses, sql_backends, "storage.stores.responses") + _ensure_backend(stores.prompts, kv_backends, "storage.stores.prompts") return self diff --git a/llama_stack/core/prompts/prompts.py b/llama_stack/core/prompts/prompts.py index 856397ca5..1e48bcc8c 100644 --- a/llama_stack/core/prompts/prompts.py +++ b/llama_stack/core/prompts/prompts.py @@ -11,7 +11,6 @@ from pydantic import BaseModel from llama_stack.apis.prompts import ListPromptsResponse, Prompt, Prompts from llama_stack.core.datatypes import StackRunConfig -from llama_stack.core.storage.datatypes import KVStoreReference from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl @@ -40,11 +39,10 @@ class PromptServiceImpl(Prompts): self.kvstore: KVStore async def initialize(self) -> None: - # Use metadata store backend with prompts-specific namespace - metadata_ref = self.config.run_config.storage.stores.metadata - if not metadata_ref: - raise ValueError("storage.stores.metadata must be configured in run config") - prompts_ref = KVStoreReference(namespace="prompts", backend=metadata_ref.backend) + # Use prompts store reference from run config + prompts_ref = self.config.run_config.storage.stores.prompts + if not prompts_ref: + raise ValueError("storage.stores.prompts must be configured in run config") self.kvstore = await kvstore_impl(prompts_ref) def _get_default_key(self, prompt_id: str) -> str: diff --git a/llama_stack/core/stack.py b/llama_stack/core/stack.py index ebfd59a05..1b5c288a1 100644 --- a/llama_stack/core/stack.py +++ b/llama_stack/core/stack.py @@ -565,6 +565,7 @@ def run_config_from_adhoc_config_spec( metadata=KVStoreReference(backend="kv_default", namespace="registry"), inference=InferenceStoreReference(backend="sql_default", table_name="inference_store"), conversations=SqlStoreReference(backend="sql_default", table_name="openai_conversations"), + prompts=KVStoreReference(backend="kv_default", namespace="prompts"), ), ), ) diff --git a/llama_stack/core/storage/datatypes.py b/llama_stack/core/storage/datatypes.py index 9df170e10..4b17b9ea9 100644 --- a/llama_stack/core/storage/datatypes.py +++ b/llama_stack/core/storage/datatypes.py @@ -271,6 +271,10 @@ class ServerStoresConfig(BaseModel): default=None, description="Responses store configuration (uses SQL backend)", ) + prompts: KVStoreReference | None = Field( + default=None, + description="Prompts store configuration (uses KV backend)", + ) class StorageConfig(BaseModel): diff --git a/llama_stack/distributions/ci-tests/run.yaml b/llama_stack/distributions/ci-tests/run.yaml index ed880d4a0..702acff8e 100644 --- a/llama_stack/distributions/ci-tests/run.yaml +++ b/llama_stack/distributions/ci-tests/run.yaml @@ -247,6 +247,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: [] shields: diff --git a/llama_stack/distributions/dell/run-with-safety.yaml b/llama_stack/distributions/dell/run-with-safety.yaml index 2563f2f4b..e0da8060d 100644 --- a/llama_stack/distributions/dell/run-with-safety.yaml +++ b/llama_stack/distributions/dell/run-with-safety.yaml @@ -109,6 +109,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/dell/run.yaml b/llama_stack/distributions/dell/run.yaml index 7bada394f..bc3117d88 100644 --- a/llama_stack/distributions/dell/run.yaml +++ b/llama_stack/distributions/dell/run.yaml @@ -105,6 +105,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml index 01b5db4f9..2fa9d198b 100644 --- a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml @@ -122,6 +122,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/meta-reference-gpu/run.yaml b/llama_stack/distributions/meta-reference-gpu/run.yaml index 87c33dde0..5c7f75ca8 100644 --- a/llama_stack/distributions/meta-reference-gpu/run.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run.yaml @@ -112,6 +112,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/nvidia/run-with-safety.yaml b/llama_stack/distributions/nvidia/run-with-safety.yaml index c23d0f9cb..1d57ad17a 100644 --- a/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -111,6 +111,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/nvidia/run.yaml b/llama_stack/distributions/nvidia/run.yaml index 81e744d53..8c50b8bfb 100644 --- a/llama_stack/distributions/nvidia/run.yaml +++ b/llama_stack/distributions/nvidia/run.yaml @@ -100,6 +100,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: [] shields: [] diff --git a/llama_stack/distributions/open-benchmark/run.yaml b/llama_stack/distributions/open-benchmark/run.yaml index 4fd0e199b..912e48dd3 100644 --- a/llama_stack/distributions/open-benchmark/run.yaml +++ b/llama_stack/distributions/open-benchmark/run.yaml @@ -142,6 +142,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/postgres-demo/run.yaml b/llama_stack/distributions/postgres-demo/run.yaml index 0d7ecff48..dd1c2bc7f 100644 --- a/llama_stack/distributions/postgres-demo/run.yaml +++ b/llama_stack/distributions/postgres-demo/run.yaml @@ -87,6 +87,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: - metadata: {} diff --git a/llama_stack/distributions/starter-gpu/run.yaml b/llama_stack/distributions/starter-gpu/run.yaml index 33e8c9b59..807f0d678 100644 --- a/llama_stack/distributions/starter-gpu/run.yaml +++ b/llama_stack/distributions/starter-gpu/run.yaml @@ -250,6 +250,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: [] shields: diff --git a/llama_stack/distributions/starter/run.yaml b/llama_stack/distributions/starter/run.yaml index 4ca0914af..eb4652af0 100644 --- a/llama_stack/distributions/starter/run.yaml +++ b/llama_stack/distributions/starter/run.yaml @@ -247,6 +247,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: [] shields: diff --git a/llama_stack/distributions/template.py b/llama_stack/distributions/template.py index f0c4c6b9e..1dad60064 100644 --- a/llama_stack/distributions/template.py +++ b/llama_stack/distributions/template.py @@ -259,6 +259,10 @@ class RunConfigSettings(BaseModel): backend="sql_default", table_name="openai_conversations", ).model_dump(exclude_none=True), + "prompts": KVStoreReference( + backend="kv_default", + namespace="prompts", + ).model_dump(exclude_none=True), } storage_config = dict( diff --git a/llama_stack/distributions/watsonx/run.yaml b/llama_stack/distributions/watsonx/run.yaml index ca3c8402d..8456115d2 100644 --- a/llama_stack/distributions/watsonx/run.yaml +++ b/llama_stack/distributions/watsonx/run.yaml @@ -115,6 +115,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default registered_resources: models: [] shields: [] diff --git a/tests/external/run-byoa.yaml b/tests/external/run-byoa.yaml index 4d63046c6..62d6b1825 100644 --- a/tests/external/run-byoa.yaml +++ b/tests/external/run-byoa.yaml @@ -25,6 +25,9 @@ storage: conversations: table_name: openai_conversations backend: sql_default + prompts: + namespace: prompts + backend: kv_default external_apis_dir: ~/.llama/apis.d external_providers_dir: ~/.llama/providers.d server: diff --git a/tests/unit/cli/test_stack_config.py b/tests/unit/cli/test_stack_config.py index 7b9f3ca0c..0977a1e43 100644 --- a/tests/unit/cli/test_stack_config.py +++ b/tests/unit/cli/test_stack_config.py @@ -44,6 +44,9 @@ def config_with_image_name_int(): responses: backend: sql_default table_name: responses + prompts: + backend: kv_default + namespace: prompts providers: inference: - provider_id: provider1 diff --git a/tests/unit/distribution/test_distribution.py b/tests/unit/distribution/test_distribution.py index 4161d7b84..11f55cfdb 100644 --- a/tests/unit/distribution/test_distribution.py +++ b/tests/unit/distribution/test_distribution.py @@ -48,6 +48,7 @@ def _default_storage() -> StorageConfig: metadata=KVStoreReference(backend="kv_default", namespace="registry"), inference=InferenceStoreReference(backend="sql_default", table_name="inference_store"), conversations=SqlStoreReference(backend="sql_default", table_name="conversations"), + prompts=KVStoreReference(backend="kv_default", namespace="prompts"), ), ) diff --git a/tests/unit/prompts/prompts/conftest.py b/tests/unit/prompts/prompts/conftest.py index fe30e1a77..c876f2041 100644 --- a/tests/unit/prompts/prompts/conftest.py +++ b/tests/unit/prompts/prompts/conftest.py @@ -18,7 +18,7 @@ from llama_stack.core.storage.datatypes import ( SqlStoreReference, StorageConfig, ) -from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends +from llama_stack.providers.utils.kvstore import register_kvstore_backends @pytest.fixture @@ -38,6 +38,7 @@ async def temp_prompt_store(tmp_path_factory): metadata=KVStoreReference(backend="kv_test", namespace="registry"), inference=InferenceStoreReference(backend="sql_test", table_name="inference"), conversations=SqlStoreReference(backend="sql_test", table_name="conversations"), + prompts=KVStoreReference(backend="kv_test", namespace="prompts"), ), ) mock_run_config = StackRunConfig( @@ -50,6 +51,6 @@ async def temp_prompt_store(tmp_path_factory): store = PromptServiceImpl(config, deps={}) register_kvstore_backends({"kv_test": storage.backends["kv_test"]}) - store.kvstore = await kvstore_impl(KVStoreReference(backend="kv_test", namespace="prompts")) + await store.initialize() yield store