version: 2 image_name: ci-tests apis: - agents - batches - datasetio - eval - files - inference - post_training - safety - scoring - tool_runtime - vector_io providers: inference: - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} provider_type: remote::cerebras config: base_url: https://api.cerebras.ai api_key: ${env.CEREBRAS_API_KEY:=} - provider_id: ${env.OLLAMA_URL:+ollama} provider_type: remote::ollama config: url: ${env.OLLAMA_URL:=http://localhost:11434} - provider_id: ${env.VLLM_URL:+vllm} provider_type: remote::vllm config: url: ${env.VLLM_URL:=} max_tokens: ${env.VLLM_MAX_TOKENS:=4096} api_token: ${env.VLLM_API_TOKEN:=fake} tls_verify: ${env.VLLM_TLS_VERIFY:=true} - provider_id: ${env.TGI_URL:+tgi} provider_type: remote::tgi config: url: ${env.TGI_URL:=} - provider_id: fireworks provider_type: remote::fireworks config: url: https://api.fireworks.ai/inference/v1 api_key: ${env.FIREWORKS_API_KEY:=} - provider_id: together provider_type: remote::together config: url: https://api.together.xyz/v1 api_key: ${env.TOGETHER_API_KEY:=} - provider_id: bedrock provider_type: remote::bedrock - provider_id: ${env.NVIDIA_API_KEY:+nvidia} provider_type: remote::nvidia config: url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} api_key: ${env.NVIDIA_API_KEY:=} append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - provider_id: openai provider_type: remote::openai config: api_key: ${env.OPENAI_API_KEY:=} base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1} - provider_id: anthropic provider_type: remote::anthropic config: api_key: ${env.ANTHROPIC_API_KEY:=} - provider_id: gemini provider_type: remote::gemini config: api_key: ${env.GEMINI_API_KEY:=} - provider_id: ${env.VERTEX_AI_PROJECT:+vertexai} provider_type: remote::vertexai config: project: ${env.VERTEX_AI_PROJECT:=} location: ${env.VERTEX_AI_LOCATION:=us-central1} - provider_id: groq provider_type: remote::groq config: url: https://api.groq.com api_key: ${env.GROQ_API_KEY:=} - provider_id: sambanova provider_type: remote::sambanova config: url: https://api.sambanova.ai/v1 api_key: ${env.SAMBANOVA_API_KEY:=} - provider_id: ${env.AZURE_API_KEY:+azure} provider_type: remote::azure config: api_key: ${env.AZURE_API_KEY:=} api_base: ${env.AZURE_API_BASE:=} api_version: ${env.AZURE_API_VERSION:=} api_type: ${env.AZURE_API_TYPE:=} - provider_id: sentence-transformers provider_type: inline::sentence-transformers vector_io: - provider_id: faiss provider_type: inline::faiss config: persistence: namespace: vector_io::faiss backend: kv_default - provider_id: sqlite-vec provider_type: inline::sqlite-vec config: db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ci-tests}/sqlite_vec.db persistence: namespace: vector_io::sqlite_vec backend: kv_default - provider_id: ${env.MILVUS_URL:+milvus} provider_type: inline::milvus config: db_path: ${env.MILVUS_DB_PATH:=~/.llama/distributions/ci-tests}/milvus.db persistence: namespace: vector_io::milvus backend: kv_default - provider_id: ${env.CHROMADB_URL:+chromadb} provider_type: remote::chromadb config: url: ${env.CHROMADB_URL:=} persistence: namespace: vector_io::chroma_remote backend: kv_default - provider_id: ${env.PGVECTOR_DB:+pgvector} provider_type: remote::pgvector config: host: ${env.PGVECTOR_HOST:=localhost} port: ${env.PGVECTOR_PORT:=5432} db: ${env.PGVECTOR_DB:=} user: ${env.PGVECTOR_USER:=} password: ${env.PGVECTOR_PASSWORD:=} persistence: namespace: vector_io::pgvector backend: kv_default - provider_id: ${env.QDRANT_URL:+qdrant} provider_type: remote::qdrant config: api_key: ${env.QDRANT_API_KEY:=} persistence: namespace: vector_io::qdrant_remote backend: kv_default - provider_id: ${env.WEAVIATE_CLUSTER_URL:+weaviate} provider_type: remote::weaviate config: weaviate_api_key: null weaviate_cluster_url: ${env.WEAVIATE_CLUSTER_URL:=localhost:8080} persistence: namespace: vector_io::weaviate backend: kv_default files: - provider_id: meta-reference-files provider_type: inline::localfs config: storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/ci-tests/files} metadata_store: table_name: files_metadata backend: sql_default safety: - provider_id: llama-guard provider_type: inline::llama-guard config: excluded_categories: [] - provider_id: code-scanner provider_type: inline::code-scanner agents: - provider_id: meta-reference provider_type: inline::meta-reference config: persistence: agent_state: namespace: agents backend: kv_default responses: table_name: responses backend: sql_default max_write_queue_size: 10000 num_writers: 4 post_training: - provider_id: torchtune-cpu provider_type: inline::torchtune-cpu config: checkpoint_format: meta eval: - provider_id: meta-reference provider_type: inline::meta-reference config: kvstore: namespace: eval backend: kv_default datasetio: - provider_id: huggingface provider_type: remote::huggingface config: kvstore: namespace: datasetio::huggingface backend: kv_default - provider_id: localfs provider_type: inline::localfs config: kvstore: namespace: datasetio::localfs backend: kv_default scoring: - provider_id: basic provider_type: inline::basic - provider_id: llm-as-judge provider_type: inline::llm-as-judge - provider_id: braintrust provider_type: inline::braintrust config: openai_api_key: ${env.OPENAI_API_KEY:=} tool_runtime: - provider_id: brave-search provider_type: remote::brave-search config: api_key: ${env.BRAVE_SEARCH_API_KEY:=} max_results: 3 - provider_id: tavily-search provider_type: remote::tavily-search config: api_key: ${env.TAVILY_SEARCH_API_KEY:=} max_results: 3 - provider_id: rag-runtime provider_type: inline::rag-runtime - provider_id: model-context-protocol provider_type: remote::model-context-protocol batches: - provider_id: reference provider_type: inline::reference config: kvstore: namespace: batches backend: kv_default storage: backends: kv_default: type: kv_sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ci-tests}/kvstore.db sql_default: type: sql_sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ci-tests}/sql_store.db stores: metadata: namespace: registry backend: kv_default inference: table_name: inference_store backend: sql_default max_write_queue_size: 10000 num_writers: 4 conversations: table_name: openai_conversations backend: sql_default registered_resources: models: [] shields: - shield_id: llama-guard provider_id: ${env.SAFETY_MODEL:+llama-guard} provider_shield_id: ${env.SAFETY_MODEL:=} - shield_id: code-scanner provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} provider_shield_id: ${env.CODE_SCANNER_MODEL:=} vector_dbs: [] datasets: [] scoring_fns: [] benchmarks: [] tool_groups: - toolgroup_id: builtin::websearch provider_id: tavily-search - toolgroup_id: builtin::rag provider_id: rag-runtime server: port: 8321 telemetry: enabled: true vector_stores: default_provider_id: faiss default_embedding_model: provider_id: sentence-transformers model_id: nomic-ai/nomic-embed-text-v1.5