feat: Allow to print usage information for install script (#2171)

# What does this PR do?

This allows users to print the usage information for this script:

```
📚 Llama-Stack Deployment Script

Description:
    This script sets up and deploys Llama-Stack with Ollama integration in containers.
    It handles both Docker and Podman runtimes and includes automatic platform detection.

Usage:
    install.sh [OPTIONS]

Options:
    -p, --port PORT            Server port for Llama-Stack (default: 8321)
    -o, --ollama-port PORT     Ollama service port (default: 11434)
    -m, --model MODEL          Model alias to use (default: llama3.2:3b)
    -i, --image IMAGE          Server image (default: llamastack/distribution-ollama:0.2.2)
    -t, --timeout SECONDS      Service wait timeout in seconds (default: 300)
    -h, --help               Show this help message

For more information:
    Documentation: https://llama-stack.readthedocs.io/
    GitHub: https://github.com/meta-llama/llama-stack

Report issues:
    https://github.com/meta-llama/llama-stack/issues

```

---------

Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
Co-authored-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Yuan Tang 2025-05-15 10:50:56 -04:00 committed by GitHub
parent 8e7ab146f8
commit 354faa15ce
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -38,6 +38,67 @@ wait_for_service() {
return 0
}
usage() {
cat << EOF
📚 Llama-Stack Deployment Script
Description:
This script sets up and deploys Llama-Stack with Ollama integration in containers.
It handles both Docker and Podman runtimes and includes automatic platform detection.
Usage:
$(basename "$0") [OPTIONS]
Options:
-p, --port PORT Server port for Llama-Stack (default: ${PORT})
-o, --ollama-port PORT Ollama service port (default: ${OLLAMA_PORT})
-m, --model MODEL Model alias to use (default: ${MODEL_ALIAS})
-i, --image IMAGE Server image (default: ${SERVER_IMAGE})
-t, --timeout SECONDS Service wait timeout in seconds (default: ${WAIT_TIMEOUT})
-h, --help Show this help message
For more information:
Documentation: https://llama-stack.readthedocs.io/
GitHub: https://github.com/meta-llama/llama-stack
Report issues:
https://github.com/meta-llama/llama-stack/issues
EOF
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
usage
exit 0
;;
-p|--port)
PORT="$2"
shift 2
;;
-o|--ollama-port)
OLLAMA_PORT="$2"
shift 2
;;
-m|--model)
MODEL_ALIAS="$2"
shift 2
;;
-i|--image)
SERVER_IMAGE="$2"
shift 2
;;
-t|--timeout)
WAIT_TIMEOUT="$2"
shift 2
;;
*)
die "Unknown option: $1"
;;
esac
done
if command -v docker &> /dev/null; then
ENGINE="docker"
elif command -v podman &> /dev/null; then