#!/usr/bin/env bash # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. set -Eeuo pipefail PORT=8321 OLLAMA_PORT=11434 MODEL_ALIAS="llama3.2:3b" SERVER_IMAGE="llamastack/distribution-ollama:0.2.2" WAIT_TIMEOUT=300 log(){ printf "\e[1;32m%s\e[0m\n" "$*"; } die(){ printf "\e[1;31m❌ %s\e[0m\n" "$*" >&2; exit 1; } wait_for_service() { local url="$1" local pattern="$2" local timeout="$3" local name="$4" local start ts log "⏳ Waiting for ${name}…" start=$(date +%s) while true; do if curl --retry 5 --retry-delay 1 --retry-max-time "$timeout" --retry-all-errors --silent --fail "$url" 2>/dev/null | grep -q "$pattern"; then break fi ts=$(date +%s) if (( ts - start >= timeout )); then return 1 fi printf '.' sleep 1 done return 0 } if command -v docker &> /dev/null; then ENGINE="docker" HOST_DNS="host.docker.internal" elif command -v podman &> /dev/null; then ENGINE="podman" HOST_DNS="host.containers.internal" else die "Docker or Podman is required. Install Docker: https://docs.docker.com/get-docker/ or Podman: https://podman.io/getting-started/installation" fi # Explicitly set the platform for the host architecture HOST_ARCH="$(uname -m)" if [ "$HOST_ARCH" = "arm64" ]; then if [ "$ENGINE" = "docker" ]; then PLATFORM_OPTS=( --platform linux/amd64 ) else PLATFORM_OPTS=( --os linux --arch amd64 ) fi else PLATFORM_OPTS=() fi # macOS + Podman: ensure VM is running before we try to launch containers if [ "$ENGINE" = "podman" ] && [ "$(uname -s)" = "Darwin" ]; then if ! podman info &>/dev/null; then log "⌛️ Initializing Podman VM…" podman machine init -q &>/dev/null || true podman machine start -q &>/dev/null || true log "⌛️ Waiting for Podman API…" until podman info &>/dev/null; do sleep 1 done log "✅ Podman VM is up" fi fi # Clean up any leftovers from earlier runs for name in ollama-server llama-stack; do ids=$($ENGINE ps -aq --filter "name=^${name}$") if [ -n "$ids" ]; then log "⚠️ Found existing container(s) for '${name}', removing…" $ENGINE rm -f "$ids" > /dev/null 2>&1 fi done ############################################################################### # 1. Ollama ############################################################################### log "🦙 Starting Ollama…" $ENGINE run -d "${PLATFORM_OPTS[@]}" --name ollama-server \ -p "${OLLAMA_PORT}:11434" \ ollama/ollama > /dev/null 2>&1 if ! wait_for_service "http://localhost:${OLLAMA_PORT}/" "Ollama" "$WAIT_TIMEOUT" "Ollama daemon"; then log "❌ Ollama daemon did not become ready in ${WAIT_TIMEOUT}s; dumping container logs:" $ENGINE logs --tail 200 ollama-server die "Ollama startup failed" fi log "📦 Ensuring model is pulled: ${MODEL_ALIAS}…" if ! $ENGINE exec ollama-server ollama pull "${MODEL_ALIAS}" > /dev/null 2>&1; then log "❌ Failed to pull model ${MODEL_ALIAS}; dumping container logs:" $ENGINE logs --tail 200 ollama-server die "Model pull failed" fi ############################################################################### # 2. Llama‑Stack ############################################################################### if [ "$ENGINE" = "docker" ]; then NET_OPTS=( -p "${PORT}:${PORT}" --add-host="${HOST_DNS}:host-gateway" ) elif [ "$ENGINE" = "podman" ]; then NET_OPTS=( -p "${PORT}:${PORT}" ) fi cmd=( run -d "${PLATFORM_OPTS[@]}" --name llama-stack "${NET_OPTS[@]}" \ "${SERVER_IMAGE}" --port "${PORT}" \ --env INFERENCE_MODEL="${MODEL_ALIAS}" \ --env OLLAMA_URL="http://${HOST_DNS}:${OLLAMA_PORT}" ) log "🦙📦 Starting Llama‑Stack…" $ENGINE "${cmd[@]}" > /dev/null 2>&1 if ! wait_for_service "http://127.0.0.1:${PORT}/v1/health" "OK" "$WAIT_TIMEOUT" "Llama-Stack API"; then log "❌ Llama-Stack did not become ready in ${WAIT_TIMEOUT}s; dumping container logs:" $ENGINE logs --tail 200 llama-stack die "Llama-Stack startup failed" fi ############################################################################### # Done ############################################################################### log "" log "🎉 Llama‑Stack is ready!" log "👉 API endpoint: http://localhost:${PORT}" log "📖 Documentation: https://llama-stack.readthedocs.io/en/latest/references/index.html" log "💻 To access the llama‑stack CLI, exec into the container:" log " $ENGINE exec -ti llama-stack bash" log ""