version: '2' image_name: kubernetes-benchmark-demo apis: - agents - files - inference - files - safety - telemetry - tool_runtime - vector_io providers: inference: - provider_id: vllm-inference provider_type: remote::vllm config: url: ${env.VLLM_URL:=http://localhost:8000/v1} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: sentence-transformers provider_type: inline::sentence-transformers config: {} files: - provider_id: meta-reference-files provider_type: inline::localfs config: storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files} metadata_store: table_name: files_metadata backend: sql_default vector_io: - provider_id: ${env.ENABLE_CHROMADB:+chromadb} provider_type: remote::chromadb config: url: ${env.CHROMADB_URL:=} persistence: namespace: vector_io::chroma_remote backend: kv_default files: - provider_id: meta-reference-files provider_type: inline::localfs config: storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files} metadata_store: table_name: files_metadata backend: sql_default safety: - provider_id: llama-guard provider_type: inline::llama-guard config: excluded_categories: [] agents: - provider_id: meta-reference provider_type: inline::meta-reference config: persistence: agent_state: namespace: agents backend: kv_default responses: table_name: responses backend: sql_default max_write_queue_size: 10000 num_writers: 4 telemetry: - provider_id: meta-reference provider_type: inline::meta-reference config: service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" sinks: ${env.TELEMETRY_SINKS:=console} tool_runtime: - provider_id: brave-search provider_type: remote::brave-search config: api_key: ${env.BRAVE_SEARCH_API_KEY:+} max_results: 3 - provider_id: tavily-search provider_type: remote::tavily-search config: api_key: ${env.TAVILY_SEARCH_API_KEY:+} max_results: 3 - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} storage: backends: kv_default: type: kv_postgres host: ${env.POSTGRES_HOST:=localhost} port: ${env.POSTGRES_PORT:=5432} db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore} sql_default: type: sql_postgres host: ${env.POSTGRES_HOST:=localhost} port: ${env.POSTGRES_PORT:=5432} db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} stores: metadata: namespace: registry backend: kv_default inference: table_name: inference_store backend: sql_default max_write_queue_size: 10000 num_writers: 4 conversations: table_name: openai_conversations backend: sql_default registered_resources: models: - metadata: embedding_dimension: 768 model_id: nomic-embed-text-v1.5 provider_id: sentence-transformers model_type: embedding - model_id: ${env.INFERENCE_MODEL} provider_id: vllm-inference model_type: llm shields: - shield_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B} vector_dbs: [] datasets: [] scoring_fns: [] benchmarks: [] tool_groups: - toolgroup_id: builtin::websearch provider_id: tavily-search - toolgroup_id: builtin::rag provider_id: rag-runtime server: port: 8323 telemetry: enabled: true vector_stores: default_provider_id: chromadb default_embedding_model: provider_id: sentence-transformers model_id: nomic-ai/nomic-embed-text-v1.5