Merge bfe06ae00f into sapling-pr-archive-ehhuang

This commit is contained in:
ehhuang 2025-10-27 12:01:12 -07:00 committed by GitHub
commit 0b9736b6c1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 793 additions and 12 deletions

View file

@ -589,6 +589,7 @@ can be instantiated multiple times (with different configs) if necessary.
_ensure_backend(stores.inference, sql_backends, "storage.stores.inference")
_ensure_backend(stores.conversations, sql_backends, "storage.stores.conversations")
_ensure_backend(stores.responses, sql_backends, "storage.stores.responses")
_ensure_backend(stores.prompts, kv_backends, "storage.stores.prompts")
return self

View file

@ -11,7 +11,6 @@ from pydantic import BaseModel
from llama_stack.apis.prompts import ListPromptsResponse, Prompt, Prompts
from llama_stack.core.datatypes import StackRunConfig
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
@ -40,11 +39,10 @@ class PromptServiceImpl(Prompts):
self.kvstore: KVStore
async def initialize(self) -> None:
# Use metadata store backend with prompts-specific namespace
metadata_ref = self.config.run_config.storage.stores.metadata
if not metadata_ref:
raise ValueError("storage.stores.metadata must be configured in run config")
prompts_ref = KVStoreReference(namespace="prompts", backend=metadata_ref.backend)
# Use prompts store reference from run config
prompts_ref = self.config.run_config.storage.stores.prompts
if not prompts_ref:
raise ValueError("storage.stores.prompts must be configured in run config")
self.kvstore = await kvstore_impl(prompts_ref)
def _get_default_key(self, prompt_id: str) -> str:

View file

@ -563,6 +563,7 @@ def run_config_from_adhoc_config_spec(
metadata=KVStoreReference(backend="kv_default", namespace="registry"),
inference=InferenceStoreReference(backend="sql_default", table_name="inference_store"),
conversations=SqlStoreReference(backend="sql_default", table_name="openai_conversations"),
prompts=KVStoreReference(backend="kv_default", namespace="prompts"),
),
),
)

View file

@ -271,6 +271,10 @@ class ServerStoresConfig(BaseModel):
default=None,
description="Responses store configuration (uses SQL backend)",
)
prompts: KVStoreReference | None = Field(
default=None,
description="Prompts store configuration (uses KV backend)",
)
class StorageConfig(BaseModel):

View file

@ -247,6 +247,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models: []
shields:

View file

@ -109,6 +109,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -105,6 +105,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -122,6 +122,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -112,6 +112,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -111,6 +111,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -100,6 +100,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models: []
shields: []

View file

@ -142,6 +142,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -87,6 +87,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models:
- metadata: {}

View file

@ -250,6 +250,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models: []
shields:

View file

@ -247,6 +247,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models: []
shields:

View file

@ -259,6 +259,10 @@ class RunConfigSettings(BaseModel):
backend="sql_default",
table_name="openai_conversations",
).model_dump(exclude_none=True),
"prompts": KVStoreReference(
backend="kv_default",
namespace="prompts",
).model_dump(exclude_none=True),
}
storage_config = dict(

View file

@ -115,6 +115,9 @@ storage:
conversations:
table_name: openai_conversations
backend: sql_default
prompts:
namespace: prompts
backend: kv_default
registered_resources:
models: []
shields: []