version: '2' distribution_spec: description: Use (an external) Ollama server for running LLM inference providers: inference: - remote::ollama vector_io: - remote::chromadb safety: - inline::llama-guard agents: - inline::meta-reference telemetry: - inline::meta-reference eval: - inline::meta-reference datasetio: - inline::localfs scoring: - inline::basic - inline::llm-as-judge tool_runtime: - remote::brave-search - remote::tavily-search - inline::rag-runtime - remote::model-context-protocol - remote::wolfram-alpha metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/inference_store.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} provider_id: remote::ollama model_type: llm - metadata: embedding_dimension: 384 model_id: all-MiniLM-L6-v2 provider_id: remote::ollama provider_model_id: all-minilm:latest model_type: embedding image_type: container additional_pip_packages: - sqlalchemy[asyncio]