From 354faa15cef47902fd51119b800e94681d444691 Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Thu, 15 May 2025 10:50:56 -0400 Subject: [PATCH] feat: Allow to print usage information for install script (#2171) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What does this PR do? This allows users to print the usage information for this script: ``` 📚 Llama-Stack Deployment Script Description: This script sets up and deploys Llama-Stack with Ollama integration in containers. It handles both Docker and Podman runtimes and includes automatic platform detection. Usage: install.sh [OPTIONS] Options: -p, --port PORT Server port for Llama-Stack (default: 8321) -o, --ollama-port PORT Ollama service port (default: 11434) -m, --model MODEL Model alias to use (default: llama3.2:3b) -i, --image IMAGE Server image (default: llamastack/distribution-ollama:0.2.2) -t, --timeout SECONDS Service wait timeout in seconds (default: 300) -h, --help Show this help message For more information: Documentation: https://llama-stack.readthedocs.io/ GitHub: https://github.com/meta-llama/llama-stack Report issues: https://github.com/meta-llama/llama-stack/issues ``` --------- Signed-off-by: Yuan Tang Co-authored-by: Sébastien Han --- install.sh | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/install.sh b/install.sh index 614dbc2f2..e424925a6 100755 --- a/install.sh +++ b/install.sh @@ -38,6 +38,67 @@ wait_for_service() { return 0 } +usage() { + cat << EOF +📚 Llama-Stack Deployment Script + +Description: + This script sets up and deploys Llama-Stack with Ollama integration in containers. + It handles both Docker and Podman runtimes and includes automatic platform detection. + +Usage: + $(basename "$0") [OPTIONS] + +Options: + -p, --port PORT Server port for Llama-Stack (default: ${PORT}) + -o, --ollama-port PORT Ollama service port (default: ${OLLAMA_PORT}) + -m, --model MODEL Model alias to use (default: ${MODEL_ALIAS}) + -i, --image IMAGE Server image (default: ${SERVER_IMAGE}) + -t, --timeout SECONDS Service wait timeout in seconds (default: ${WAIT_TIMEOUT}) + -h, --help Show this help message + +For more information: + Documentation: https://llama-stack.readthedocs.io/ + GitHub: https://github.com/meta-llama/llama-stack + +Report issues: + https://github.com/meta-llama/llama-stack/issues +EOF +} + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + usage + exit 0 + ;; + -p|--port) + PORT="$2" + shift 2 + ;; + -o|--ollama-port) + OLLAMA_PORT="$2" + shift 2 + ;; + -m|--model) + MODEL_ALIAS="$2" + shift 2 + ;; + -i|--image) + SERVER_IMAGE="$2" + shift 2 + ;; + -t|--timeout) + WAIT_TIMEOUT="$2" + shift 2 + ;; + *) + die "Unknown option: $1" + ;; + esac +done + if command -v docker &> /dev/null; then ENGINE="docker" elif command -v podman &> /dev/null; then