# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. from pathlib import Path from llama_stack.apis.models.models import ModelType from llama_stack.distribution.datatypes import ( ModelInput, Provider, ShieldInput, ToolGroupInput, ) from llama_stack.providers.inline.vector_io.sqlite_vec.config import SQLiteVectorIOConfig from llama_stack.providers.remote.inference.ollama import OllamaImplConfig from llama_stack.templates.template import DistributionTemplate, RunConfigSettings def get_distribution_template() -> DistributionTemplate: providers = { "inference": ["remote::ollama"], "vector_io": ["inline::sqlite-vec", "remote::chromadb", "remote::pgvector"], "safety": ["inline::llama-guard"], "agents": ["inline::meta-reference"], "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], "datasetio": ["remote::huggingface", "inline::localfs"], "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"], "tool_runtime": [ "remote::brave-search", "remote::tavily-search", "inline::code-interpreter", "inline::rag-runtime", "remote::model-context-protocol", "remote::wolfram-alpha", ], } name = "ollama" inference_provider = Provider( provider_id="ollama", provider_type="remote::ollama", config=OllamaImplConfig.sample_run_config(), ) vector_io_provider_sqlite = Provider( provider_id="sqlite-vec", provider_type="inline::sqlite-vec", config=SQLiteVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"), ) inference_model = ModelInput( model_id="${env.INFERENCE_MODEL}", provider_id="ollama", ) safety_model = ModelInput( model_id="${env.SAFETY_MODEL}", provider_id="ollama", ) embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", provider_id="ollama", provider_model_id="all-minilm:latest", model_type=ModelType.embedding, metadata={ "embedding_dimension": 384, }, ) default_tool_groups = [ ToolGroupInput( toolgroup_id="builtin::websearch", provider_id="tavily-search", ), ToolGroupInput( toolgroup_id="builtin::rag", provider_id="rag-runtime", ), ToolGroupInput( toolgroup_id="builtin::code_interpreter", provider_id="code-interpreter", ), ToolGroupInput( toolgroup_id="builtin::wolfram_alpha", provider_id="wolfram-alpha", ), ] return DistributionTemplate( name=name, distro_type="self_hosted", description="Use (an external) Ollama server for running LLM inference", container_image=None, template_path=Path(__file__).parent / "doc_template.md", providers=providers, run_configs={ "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider], "vector_io": [vector_io_provider_sqlite], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider], "vector_io": [vector_io_provider_sqlite], "safety": [ Provider( provider_id="llama-guard", provider_type="inline::llama-guard", config={}, ), Provider( provider_id="code-scanner", provider_type="inline::code-scanner", config={}, ), ], }, default_models=[ inference_model, safety_model, embedding_model, ], default_shields=[ ShieldInput( shield_id="${env.SAFETY_MODEL}", provider_id="llama-guard", ), ShieldInput( shield_id="CodeScanner", provider_id="code-scanner", ), ], default_tool_groups=default_tool_groups, ), }, run_config_env_vars={ "LLAMA_STACK_PORT": ( "5001", "Port for the Llama Stack distribution server", ), "OLLAMA_URL": ( "http://127.0.0.1:11434", "URL of the Ollama server", ), "INFERENCE_MODEL": ( "meta-llama/Llama-3.2-3B-Instruct", "Inference model loaded into the Ollama server", ), "SAFETY_MODEL": ( "meta-llama/Llama-Guard-3-1B", "Safety model loaded into the Ollama server", ), }, )