mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-24 08:47:26 +00:00
**This PR changes configurations in a backward incompatible way.**
Run configs today repeat full SQLite/Postgres snippets everywhere a
store is needed, which means duplicated credentials, extra connection
pools, and lots of drift between files. This PR introduces named storage
backends so the stack and providers can share a single catalog and
reference those backends by name.
## Key Changes
- Add `storage.backends` to `StackRunConfig`, register each KV/SQL
backend once at startup, and validate that references point to the right
family.
- Move server stores under `storage.stores` with lightweight references
(backend + namespace/table) instead of full configs.
- Update every provider/config/doc to use the new reference style;
docs/codegen now surface the simplified YAML.
## Migration
Before:
```yaml
metadata_store:
type: sqlite
db_path: ~/.llama/distributions/foo/registry.db
inference_store:
type: postgres
host: ${env.POSTGRES_HOST}
port: ${env.POSTGRES_PORT}
db: ${env.POSTGRES_DB}
user: ${env.POSTGRES_USER}
password: ${env.POSTGRES_PASSWORD}
conversations_store:
type: postgres
host: ${env.POSTGRES_HOST}
port: ${env.POSTGRES_PORT}
db: ${env.POSTGRES_DB}
user: ${env.POSTGRES_USER}
password: ${env.POSTGRES_PASSWORD}
```
After:
```yaml
storage:
backends:
kv_default:
type: kv_sqlite
db_path: ~/.llama/distributions/foo/kvstore.db
sql_default:
type: sql_postgres
host: ${env.POSTGRES_HOST}
port: ${env.POSTGRES_PORT}
db: ${env.POSTGRES_DB}
user: ${env.POSTGRES_USER}
password: ${env.POSTGRES_PASSWORD}
stores:
metadata:
backend: kv_default
namespace: registry
inference:
backend: sql_default
table_name: inference_store
max_write_queue_size: 10000
num_writers: 4
conversations:
backend: sql_default
table_name: openai_conversations
```
Provider configs follow the same pattern—for example, a Chroma vector
adapter switches from:
```yaml
providers:
vector_io:
- provider_id: chromadb
provider_type: remote::chromadb
config:
url: ${env.CHROMADB_URL}
kvstore:
type: sqlite
db_path: ~/.llama/distributions/foo/chroma.db
```
to:
```yaml
providers:
vector_io:
- provider_id: chromadb
provider_type: remote::chromadb
config:
url: ${env.CHROMADB_URL}
persistence:
backend: kv_default
namespace: vector_io::chroma_remote
```
Once the backends are declared, everything else just points at them, so
rotating credentials or swapping to Postgres happens in one place and
the stack reuses a single connection pool.
125 lines
4.4 KiB
Python
125 lines
4.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.apis.models import ModelType
|
|
from llama_stack.core.datatypes import (
|
|
BuildProvider,
|
|
ModelInput,
|
|
Provider,
|
|
ShieldInput,
|
|
ToolGroupInput,
|
|
)
|
|
from llama_stack.distributions.template import (
|
|
DistributionTemplate,
|
|
RunConfigSettings,
|
|
)
|
|
from llama_stack.providers.inline.inference.sentence_transformers import SentenceTransformersInferenceConfig
|
|
from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig
|
|
from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOConfig
|
|
from llama_stack.providers.utils.kvstore.config import PostgresKVStoreConfig
|
|
from llama_stack.providers.utils.sqlstore.sqlstore import PostgresSqlStoreConfig
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
inference_providers = [
|
|
Provider(
|
|
provider_id="vllm-inference",
|
|
provider_type="remote::vllm",
|
|
config=VLLMInferenceAdapterConfig.sample_run_config(
|
|
url="${env.VLLM_URL:=http://localhost:8000/v1}",
|
|
),
|
|
),
|
|
]
|
|
providers = {
|
|
"inference": [
|
|
BuildProvider(provider_type="remote::vllm"),
|
|
BuildProvider(provider_type="inline::sentence-transformers"),
|
|
],
|
|
"vector_io": [BuildProvider(provider_type="remote::chromadb")],
|
|
"safety": [BuildProvider(provider_type="inline::llama-guard")],
|
|
"agents": [BuildProvider(provider_type="inline::meta-reference")],
|
|
"tool_runtime": [
|
|
BuildProvider(provider_type="remote::brave-search"),
|
|
BuildProvider(provider_type="remote::tavily-search"),
|
|
BuildProvider(provider_type="inline::rag-runtime"),
|
|
BuildProvider(provider_type="remote::model-context-protocol"),
|
|
],
|
|
}
|
|
name = "postgres-demo"
|
|
|
|
vector_io_providers = [
|
|
Provider(
|
|
provider_id="${env.ENABLE_CHROMADB:+chromadb}",
|
|
provider_type="remote::chromadb",
|
|
config=ChromaVectorIOConfig.sample_run_config(
|
|
f"~/.llama/distributions/{name}",
|
|
url="${env.CHROMADB_URL:=}",
|
|
),
|
|
),
|
|
]
|
|
default_tool_groups = [
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::websearch",
|
|
provider_id="tavily-search",
|
|
),
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::rag",
|
|
provider_id="rag-runtime",
|
|
),
|
|
]
|
|
|
|
default_models = [
|
|
ModelInput(
|
|
model_id="${env.INFERENCE_MODEL}",
|
|
provider_id="vllm-inference",
|
|
)
|
|
]
|
|
embedding_provider = Provider(
|
|
provider_id="sentence-transformers",
|
|
provider_type="inline::sentence-transformers",
|
|
config=SentenceTransformersInferenceConfig.sample_run_config(),
|
|
)
|
|
embedding_model = ModelInput(
|
|
model_id="nomic-embed-text-v1.5",
|
|
provider_id=embedding_provider.provider_id,
|
|
model_type=ModelType.embedding,
|
|
metadata={
|
|
"embedding_dimension": 768,
|
|
},
|
|
)
|
|
return DistributionTemplate(
|
|
name=name,
|
|
distro_type="self_hosted",
|
|
description="Quick start template for running Llama Stack with several popular providers",
|
|
container_image=None,
|
|
template_path=None,
|
|
providers=providers,
|
|
available_models_by_provider={},
|
|
run_configs={
|
|
"run.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"inference": inference_providers + [embedding_provider],
|
|
"vector_io": vector_io_providers,
|
|
},
|
|
default_models=default_models + [embedding_model],
|
|
default_tool_groups=default_tool_groups,
|
|
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
|
|
storage_backends={
|
|
"kv_default": PostgresKVStoreConfig.sample_run_config(
|
|
table_name="llamastack_kvstore",
|
|
),
|
|
"sql_default": PostgresSqlStoreConfig.sample_run_config(),
|
|
},
|
|
),
|
|
},
|
|
run_config_env_vars={
|
|
"LLAMA_STACK_PORT": (
|
|
"8321",
|
|
"Port for the Llama Stack distribution server",
|
|
),
|
|
},
|
|
)
|