version: 2 distribution_spec: description: Quick start template for running Llama Stack with several popular providers providers: inference: - remote::cerebras - remote::ollama - remote::vllm - remote::tgi - remote::hf::serverless - remote::hf::endpoint - remote::fireworks - remote::together - remote::bedrock - remote::databricks - remote::nvidia - remote::runpod - remote::openai - remote::anthropic - remote::gemini - remote::groq - remote::fireworks-openai-compat - remote::llama-openai-compat - remote::together-openai-compat - remote::groq-openai-compat - remote::sambanova-openai-compat - remote::cerebras-openai-compat - remote::sambanova - remote::passthrough - inline::sentence-transformers vector_io: - inline::faiss - inline::sqlite-vec - inline::milvus - remote::chromadb - remote::pgvector files: - inline::localfs safety: - inline::llama-guard agents: - inline::meta-reference telemetry: - inline::meta-reference post_training: - inline::huggingface eval: - inline::meta-reference datasetio: - remote::huggingface - inline::localfs scoring: - inline::basic - inline::llm-as-judge - inline::braintrust tool_runtime: - remote::brave-search - remote::tavily-search - inline::rag-runtime - remote::model-context-protocol image_type: conda additional_pip_packages: - aiosqlite - asyncpg - sqlalchemy[asyncio]