version: '2' image_name: ollama docker_image: null conda_env: ollama apis: - agents - datasetio - eval - inference - memory - safety - scoring - telemetry providers: inference: - provider_id: ollama provider_type: remote::ollama config: url: ${env.OLLAMA_URL:http://localhost:11434} memory: - provider_id: faiss provider_type: inline::faiss config: kvstore: type: sqlite namespace: null db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/faiss_store.db safety: - provider_id: llama-guard provider_type: inline::llama-guard config: {} agents: - provider_id: meta-reference provider_type: inline::meta-reference config: persistence_store: type: sqlite namespace: null db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/agents_store.db telemetry: - provider_id: meta-reference provider_type: inline::meta-reference config: service_name: ${env.OTEL_SERVICE_NAME:llama-stack} sinks: ${env.TELEMETRY_SINKS:console,sqlite} sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/ollama/trace_store.db} eval: - provider_id: meta-reference provider_type: inline::meta-reference config: {} datasetio: - provider_id: huggingface provider_type: remote::huggingface config: {} - provider_id: localfs provider_type: inline::localfs config: {} scoring: - provider_id: basic provider_type: inline::basic config: {} - provider_id: llm-as-judge provider_type: inline::llm-as-judge config: {} - provider_id: braintrust provider_type: inline::braintrust config: openai_api_key: ${env.OPENAI_API_KEY:} metadata_store: namespace: null type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} provider_id: ollama provider_model_id: null shields: [] memory_banks: [] datasets: [] scoring_fns: [] eval_tasks: []