mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-22 08:17:18 +00:00
**This PR changes configurations in a backward incompatible way.** Run configs today repeat full SQLite/Postgres snippets everywhere a store is needed, which means duplicated credentials, extra connection pools, and lots of drift between files. This PR introduces named storage backends so the stack and providers can share a single catalog and reference those backends by name. ## Key Changes - Add `storage.backends` to `StackRunConfig`, register each KV/SQL backend once at startup, and validate that references point to the right family. - Move server stores under `storage.stores` with lightweight references (backend + namespace/table) instead of full configs. - Update every provider/config/doc to use the new reference style; docs/codegen now surface the simplified YAML. ## Migration Before: ```yaml metadata_store: type: sqlite db_path: ~/.llama/distributions/foo/registry.db inference_store: type: postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} conversations_store: type: postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} ``` After: ```yaml storage: backends: kv_default: type: kv_sqlite db_path: ~/.llama/distributions/foo/kvstore.db sql_default: type: sql_postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} stores: metadata: backend: kv_default namespace: registry inference: backend: sql_default table_name: inference_store max_write_queue_size: 10000 num_writers: 4 conversations: backend: sql_default table_name: openai_conversations ``` Provider configs follow the same pattern—for example, a Chroma vector adapter switches from: ```yaml providers: vector_io: - provider_id: chromadb provider_type: remote::chromadb config: url: ${env.CHROMADB_URL} kvstore: type: sqlite db_path: ~/.llama/distributions/foo/chroma.db ``` to: ```yaml providers: vector_io: - provider_id: chromadb provider_type: remote::chromadb config: url: ${env.CHROMADB_URL} persistence: backend: kv_default namespace: vector_io::chroma_remote ``` Once the backends are declared, everything else just points at them, so rotating credentials or swapping to Postgres happens in one place and the stack reuses a single connection pool.
255 lines
7.2 KiB
YAML
255 lines
7.2 KiB
YAML
version: 2
|
|
image_name: starter
|
|
apis:
|
|
- agents
|
|
- batches
|
|
- datasetio
|
|
- eval
|
|
- files
|
|
- inference
|
|
- post_training
|
|
- safety
|
|
- scoring
|
|
- tool_runtime
|
|
- vector_io
|
|
providers:
|
|
inference:
|
|
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
|
|
provider_type: remote::cerebras
|
|
config:
|
|
base_url: https://api.cerebras.ai
|
|
api_key: ${env.CEREBRAS_API_KEY:=}
|
|
- provider_id: ${env.OLLAMA_URL:+ollama}
|
|
provider_type: remote::ollama
|
|
config:
|
|
url: ${env.OLLAMA_URL:=http://localhost:11434}
|
|
- provider_id: ${env.VLLM_URL:+vllm}
|
|
provider_type: remote::vllm
|
|
config:
|
|
url: ${env.VLLM_URL:=}
|
|
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
|
api_token: ${env.VLLM_API_TOKEN:=fake}
|
|
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
|
- provider_id: ${env.TGI_URL:+tgi}
|
|
provider_type: remote::tgi
|
|
config:
|
|
url: ${env.TGI_URL:=}
|
|
- provider_id: fireworks
|
|
provider_type: remote::fireworks
|
|
config:
|
|
url: https://api.fireworks.ai/inference/v1
|
|
api_key: ${env.FIREWORKS_API_KEY:=}
|
|
- provider_id: together
|
|
provider_type: remote::together
|
|
config:
|
|
url: https://api.together.xyz/v1
|
|
api_key: ${env.TOGETHER_API_KEY:=}
|
|
- provider_id: bedrock
|
|
provider_type: remote::bedrock
|
|
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
|
|
provider_type: remote::nvidia
|
|
config:
|
|
url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
|
|
api_key: ${env.NVIDIA_API_KEY:=}
|
|
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
|
|
- provider_id: openai
|
|
provider_type: remote::openai
|
|
config:
|
|
api_key: ${env.OPENAI_API_KEY:=}
|
|
base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1}
|
|
- provider_id: anthropic
|
|
provider_type: remote::anthropic
|
|
config:
|
|
api_key: ${env.ANTHROPIC_API_KEY:=}
|
|
- provider_id: gemini
|
|
provider_type: remote::gemini
|
|
config:
|
|
api_key: ${env.GEMINI_API_KEY:=}
|
|
- provider_id: ${env.VERTEX_AI_PROJECT:+vertexai}
|
|
provider_type: remote::vertexai
|
|
config:
|
|
project: ${env.VERTEX_AI_PROJECT:=}
|
|
location: ${env.VERTEX_AI_LOCATION:=us-central1}
|
|
- provider_id: groq
|
|
provider_type: remote::groq
|
|
config:
|
|
url: https://api.groq.com
|
|
api_key: ${env.GROQ_API_KEY:=}
|
|
- provider_id: sambanova
|
|
provider_type: remote::sambanova
|
|
config:
|
|
url: https://api.sambanova.ai/v1
|
|
api_key: ${env.SAMBANOVA_API_KEY:=}
|
|
- provider_id: ${env.AZURE_API_KEY:+azure}
|
|
provider_type: remote::azure
|
|
config:
|
|
api_key: ${env.AZURE_API_KEY:=}
|
|
api_base: ${env.AZURE_API_BASE:=}
|
|
api_version: ${env.AZURE_API_VERSION:=}
|
|
api_type: ${env.AZURE_API_TYPE:=}
|
|
- provider_id: sentence-transformers
|
|
provider_type: inline::sentence-transformers
|
|
vector_io:
|
|
- provider_id: faiss
|
|
provider_type: inline::faiss
|
|
config:
|
|
persistence:
|
|
namespace: vector_io::faiss
|
|
backend: kv_default
|
|
- provider_id: sqlite-vec
|
|
provider_type: inline::sqlite-vec
|
|
config:
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/sqlite_vec.db
|
|
persistence:
|
|
namespace: vector_io::sqlite_vec
|
|
backend: kv_default
|
|
- provider_id: ${env.MILVUS_URL:+milvus}
|
|
provider_type: inline::milvus
|
|
config:
|
|
db_path: ${env.MILVUS_DB_PATH:=~/.llama/distributions/starter}/milvus.db
|
|
persistence:
|
|
namespace: vector_io::milvus
|
|
backend: kv_default
|
|
- provider_id: ${env.CHROMADB_URL:+chromadb}
|
|
provider_type: remote::chromadb
|
|
config:
|
|
url: ${env.CHROMADB_URL:=}
|
|
persistence:
|
|
namespace: vector_io::chroma_remote
|
|
backend: kv_default
|
|
- provider_id: ${env.PGVECTOR_DB:+pgvector}
|
|
provider_type: remote::pgvector
|
|
config:
|
|
host: ${env.PGVECTOR_HOST:=localhost}
|
|
port: ${env.PGVECTOR_PORT:=5432}
|
|
db: ${env.PGVECTOR_DB:=}
|
|
user: ${env.PGVECTOR_USER:=}
|
|
password: ${env.PGVECTOR_PASSWORD:=}
|
|
persistence:
|
|
namespace: vector_io::pgvector
|
|
backend: kv_default
|
|
files:
|
|
- provider_id: meta-reference-files
|
|
provider_type: inline::localfs
|
|
config:
|
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
|
metadata_store:
|
|
table_name: files_metadata
|
|
backend: sql_default
|
|
safety:
|
|
- provider_id: llama-guard
|
|
provider_type: inline::llama-guard
|
|
config:
|
|
excluded_categories: []
|
|
- provider_id: code-scanner
|
|
provider_type: inline::code-scanner
|
|
agents:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence:
|
|
agent_state:
|
|
namespace: agents
|
|
backend: kv_default
|
|
responses:
|
|
table_name: responses
|
|
backend: sql_default
|
|
max_write_queue_size: 10000
|
|
num_writers: 4
|
|
post_training:
|
|
- provider_id: torchtune-cpu
|
|
provider_type: inline::torchtune-cpu
|
|
config:
|
|
checkpoint_format: meta
|
|
eval:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
kvstore:
|
|
namespace: eval
|
|
backend: kv_default
|
|
datasetio:
|
|
- provider_id: huggingface
|
|
provider_type: remote::huggingface
|
|
config:
|
|
kvstore:
|
|
namespace: datasetio::huggingface
|
|
backend: kv_default
|
|
- provider_id: localfs
|
|
provider_type: inline::localfs
|
|
config:
|
|
kvstore:
|
|
namespace: datasetio::localfs
|
|
backend: kv_default
|
|
scoring:
|
|
- provider_id: basic
|
|
provider_type: inline::basic
|
|
- provider_id: llm-as-judge
|
|
provider_type: inline::llm-as-judge
|
|
- provider_id: braintrust
|
|
provider_type: inline::braintrust
|
|
config:
|
|
openai_api_key: ${env.OPENAI_API_KEY:=}
|
|
tool_runtime:
|
|
- provider_id: brave-search
|
|
provider_type: remote::brave-search
|
|
config:
|
|
api_key: ${env.BRAVE_SEARCH_API_KEY:=}
|
|
max_results: 3
|
|
- provider_id: tavily-search
|
|
provider_type: remote::tavily-search
|
|
config:
|
|
api_key: ${env.TAVILY_SEARCH_API_KEY:=}
|
|
max_results: 3
|
|
- provider_id: rag-runtime
|
|
provider_type: inline::rag-runtime
|
|
- provider_id: model-context-protocol
|
|
provider_type: remote::model-context-protocol
|
|
batches:
|
|
- provider_id: reference
|
|
provider_type: inline::reference
|
|
config:
|
|
kvstore:
|
|
namespace: batches
|
|
backend: kv_default
|
|
storage:
|
|
backends:
|
|
kv_default:
|
|
type: kv_sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/kvstore.db
|
|
sql_default:
|
|
type: sql_sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/sql_store.db
|
|
stores:
|
|
metadata:
|
|
namespace: registry
|
|
backend: kv_default
|
|
inference:
|
|
table_name: inference_store
|
|
backend: sql_default
|
|
max_write_queue_size: 10000
|
|
num_writers: 4
|
|
conversations:
|
|
table_name: openai_conversations
|
|
backend: sql_default
|
|
models: []
|
|
shields:
|
|
- shield_id: llama-guard
|
|
provider_id: ${env.SAFETY_MODEL:+llama-guard}
|
|
provider_shield_id: ${env.SAFETY_MODEL:=}
|
|
- shield_id: code-scanner
|
|
provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner}
|
|
provider_shield_id: ${env.CODE_SCANNER_MODEL:=}
|
|
vector_dbs: []
|
|
datasets: []
|
|
scoring_fns: []
|
|
benchmarks: []
|
|
tool_groups:
|
|
- toolgroup_id: builtin::websearch
|
|
provider_id: tavily-search
|
|
- toolgroup_id: builtin::rag
|
|
provider_id: rag-runtime
|
|
server:
|
|
port: 8321
|
|
telemetry:
|
|
enabled: true
|