diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3707d4671..89064b692 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,3 +57,17 @@ repos: # hooks: # - id: markdown-link-check # args: ['--quiet'] + +# - repo: local +# hooks: +# - id: distro-codegen +# name: Distribution Template Codegen +# additional_dependencies: +# - rich +# - pydantic +# entry: python -m llama_stack.scripts.distro_codegen +# language: python +# pass_filenames: false +# require_serial: true +# files: ^llama_stack/templates/.*$ +# stages: [manual] diff --git a/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md index a0add3858..74a838d2f 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/meta-reference-gpu.md @@ -13,6 +13,15 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs. +### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `INFERENCE_MODEL`: Inference model loaded into the Meta Reference server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `INFERENCE_CHECKPOINT_DIR`: Directory containing the Meta Reference model checkpoint (default: `null`) +- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) +- `SAFETY_CHECKPOINT_DIR`: Directory containing the Llama-Guard model checkpoint (default: `null`) ## Prerequisite: Downloading Models diff --git a/docs/source/getting_started/distributions/self_hosted_distro/ollama.md b/docs/source/getting_started/distributions/self_hosted_distro/ollama.md index 0acee3198..63eddbe65 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/ollama.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/ollama.md @@ -11,7 +11,15 @@ The `llamastack/distribution-ollama` distribution consists of the following prov | telemetry | `inline::meta-reference` | -You should use this distribution if you have a regular desktop machine without very powerful GPUs. Of course, if you have powerful GPUs, you can still continue using this distribution since Ollama supports GPU acceleration. +You should use this distribution if you have a regular desktop machine without very powerful GPUs. Of course, if you have powerful GPUs, you can still continue using this distribution since Ollama supports GPU acceleration.### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `OLLAMA_URL`: URL of the Ollama server (default: `http://127.0.0.1:11434`) +- `INFERENCE_MODEL`: Inference model loaded into the Ollama server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `SAFETY_MODEL`: Safety model loaded into the Ollama server (default: `meta-llama/Llama-Guard-3-1B`) + ## Setting up Ollama server diff --git a/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md b/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md index c9f8d6167..e1a6ad2dc 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/remote-vllm.md @@ -13,6 +13,16 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference. +### Environment Variables + +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `INFERENCE_MODEL`: Inference model loaded into the vLLM server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `VLLM_URL`: URL of the vLLM server with the main inference model (default: `http://host.docker.internal:5100}/v1`) +- `MAX_TOKENS`: Maximum number of tokens for generation (default: `4096`) +- `SAFETY_VLLM_URL`: URL of the vLLM server with the safety model (default: `http://host.docker.internal:5101/v1`) +- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) ## Setting up vLLM server diff --git a/llama_stack/scripts/distro_codegen.py b/llama_stack/scripts/distro_codegen.py index 47d2dc41c..f0d3bb4b9 100644 --- a/llama_stack/scripts/distro_codegen.py +++ b/llama_stack/scripts/distro_codegen.py @@ -6,6 +6,8 @@ import concurrent.futures import importlib +import subprocess +import sys from functools import partial from pathlib import Path from typing import Iterator @@ -55,6 +57,16 @@ def process_template(template_dir: Path, progress) -> None: raise e +def check_for_changes() -> bool: + """Check if there are any uncommitted changes.""" + result = subprocess.run( + ["git", "diff", "--exit-code"], + cwd=REPO_ROOT, + capture_output=True, + ) + return result.returncode != 0 + + def main(): templates_dir = REPO_ROOT / "llama_stack" / "templates" @@ -76,6 +88,15 @@ def main(): list(executor.map(process_func, template_dirs)) progress.update(task, advance=len(template_dirs)) + if check_for_changes(): + print( + "Distribution template changes detected. Please commit the changes.", + file=sys.stderr, + ) + sys.exit(1) + + sys.exit(0) + if __name__ == "__main__": main() diff --git a/llama_stack/templates/meta-reference-gpu/meta_reference.py b/llama_stack/templates/meta-reference-gpu/meta_reference.py index 04bf889c2..f254bc920 100644 --- a/llama_stack/templates/meta-reference-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-gpu/meta_reference.py @@ -75,7 +75,7 @@ def get_distribution_template() -> DistributionTemplate: default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], ), }, - docker_compose_env_vars={ + run_config_env_vars={ "LLAMASTACK_PORT": ( "5001", "Port for the Llama Stack distribution server", diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index 6e0056a77..b30c75bb5 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -63,7 +63,7 @@ def get_distribution_template() -> DistributionTemplate: default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], ), }, - docker_compose_env_vars={ + run_config_env_vars={ "LLAMASTACK_PORT": ( "5001", "Port for the Llama Stack distribution server", diff --git a/llama_stack/templates/remote-vllm/vllm.py b/llama_stack/templates/remote-vllm/vllm.py index ad3c1d8e2..c3858f7e5 100644 --- a/llama_stack/templates/remote-vllm/vllm.py +++ b/llama_stack/templates/remote-vllm/vllm.py @@ -71,7 +71,7 @@ def get_distribution_template() -> DistributionTemplate: default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], ), }, - docker_compose_env_vars={ + run_config_env_vars={ "LLAMASTACK_PORT": ( "5001", "Port for the Llama Stack distribution server",