chore(ci): remove redundant steps and simplify network setup

This commit is contained in:
reluctantfuturist 2025-04-18 15:00:25 -07:00
parent d4e5d4c1fa
commit 19ad7ba513
2 changed files with 22 additions and 30 deletions

View file

@ -23,15 +23,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Make installer executable
run: chmod +x install.sh
- name: Run installer end-to-end - name: Run installer end-to-end
run: ./install.sh run: ./install.sh
- name: Wait for health endpoint - name: Wait for health endpoint
run: | run: |
timeout 120 bash -c \ timeout 120 bash -c \
'until curl -fsS http://localhost:8321/v1/health; do sleep 1; done' 'until curl -fsS http://localhost:8321/v1/health; do sleep 1; done'
- name: Cleanup
run: |
docker rm -f ollama-server llama-stack || true
docker network rm llama-stack-net || true

View file

@ -12,24 +12,28 @@ OLLAMA_PORT=11434
MODEL_ALIAS="llama3.2:3b" MODEL_ALIAS="llama3.2:3b"
SERVER_IMAGE="llamastack/distribution-ollama:0.2.2" SERVER_IMAGE="llamastack/distribution-ollama:0.2.2"
WAIT_TIMEOUT=300 WAIT_TIMEOUT=300
NETWORK="llama-stack-net"
log(){ printf "\e[1;32m%s\e[0m\n" "$*"; } log(){ printf "\e[1;32m%s\e[0m\n" "$*"; }
die(){ printf "\e[1;31m❌ %s\e[0m\n" "$*" >&2; exit 1; } die(){ printf "\e[1;31m❌ %s\e[0m\n" "$*" >&2; exit 1; }
if command -v docker &> /dev/null; then ENGINE="docker" if command -v docker &> /dev/null; then
elif command -v podman &> /dev/null; then ENGINE="podman" ENGINE="docker"
else die "Docker or Podman is required. Install Docker: https://docs.docker.com/get-docker/ or Podman: https://podman.io/getting-started/installation" HOST_DNS="host.docker.internal"
fi elif command -v podman &> /dev/null; then
ENGINE="podman"
if ! $ENGINE network ls --filter name=^${NETWORK}$ --format '{{.Name}}' | grep -q "^${NETWORK}$"; then HOST_DNS="host.containers.internal"
log "Creating Docker network: ${NETWORK}" else
$ENGINE network create "${NETWORK}" die "Docker or Podman is required. Install Docker: https://docs.docker.com/get-docker/ or Podman: https://podman.io/getting-started/installation"
fi fi
# Clean up any leftovers from earlier runs # Clean up any leftovers from earlier runs
for name in ollama-server llama-stack; do for name in ollama-server llama-stack; do
$ENGINE ps -aq --filter "name=^${name}$" | xargs -r $ENGINE rm -f # detect existing containers
ids=$($ENGINE ps -aq --filter "name=^${name}$")
if [ -n "$ids" ]; then
log "⚠️ Found existing container(s) for '${name}', removing..."
$ENGINE rm -f $ids
fi
done done
############################################################################### ###############################################################################
@ -37,7 +41,6 @@ done
############################################################################### ###############################################################################
log "🦙 Starting Ollama…" log "🦙 Starting Ollama…"
$ENGINE run -d --name ollama-server \ $ENGINE run -d --name ollama-server \
--network "${NETWORK}" \
-p "${OLLAMA_PORT}:11434" \ -p "${OLLAMA_PORT}:11434" \
-v ollama-models:/root/.ollama \ -v ollama-models:/root/.ollama \
ollama/ollama >/dev/null ollama/ollama >/dev/null
@ -47,25 +50,20 @@ timeout "$WAIT_TIMEOUT" bash -c \
"until curl -fsS http://localhost:${OLLAMA_PORT}/ 2>/dev/null | grep -q 'Ollama'; do sleep 1; done" \ "until curl -fsS http://localhost:${OLLAMA_PORT}/ 2>/dev/null | grep -q 'Ollama'; do sleep 1; done" \
|| die "Ollama did not become ready in ${WAIT_TIMEOUT}s" || die "Ollama did not become ready in ${WAIT_TIMEOUT}s"
if ! $ENGINE exec ollama-server ollama list | grep -q "$MODEL_ALIAS"; then log "📦 Ensuring model is pulled: ${MODEL_ALIAS}..."
log "📦 Pulling model $MODEL_ALIAS" $ENGINE exec ollama-server ollama pull "${MODEL_ALIAS}"
$ENGINE exec ollama-server ollama pull "$MODEL_ALIAS"
fi
log "🚀 Launching model runtime…"
$ENGINE exec -d ollama-server ollama run "$MODEL_ALIAS" --keepalive 60m
############################################################################### ###############################################################################
# 2. LlamaStack # 2. LlamaStack
############################################################################### ###############################################################################
log "🦙📦 Starting LlamaStack…" log "🦙📦 Starting LlamaStack…"
$ENGINE run -d --name llama-stack \ $ENGINE run -d --name llama-stack \
--network "${NETWORK}" \
-p "${PORT}:${PORT}" \ -p "${PORT}:${PORT}" \
--add-host="${HOST_DNS}:host-gateway" \
"${SERVER_IMAGE}" \ "${SERVER_IMAGE}" \
--port "${PORT}" \ --port "${PORT}" \
--env INFERENCE_MODEL="${MODEL_ALIAS}" \ --env INFERENCE_MODEL="${MODEL_ALIAS}" \
--env OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}" >/dev/null --env OLLAMA_URL="http://${HOST_DNS}:${OLLAMA_PORT}" >/dev/null
log "⏳ Waiting for LlamaStack API…" log "⏳ Waiting for LlamaStack API…"
timeout "$WAIT_TIMEOUT" bash -c \ timeout "$WAIT_TIMEOUT" bash -c \
@ -78,7 +76,7 @@ timeout "$WAIT_TIMEOUT" bash -c \
log "" log ""
log "🎉 LlamaStack is ready!" log "🎉 LlamaStack is ready!"
log "👉 API endpoint: http://localhost:${PORT}" log "👉 API endpoint: http://localhost:${PORT}"
log "📖 Documentation: https://llama-stack.readthedocs.io/en/latest/references/index.html"
# Note: if youre calling from another container on the “${NETWORK}” network, log "💻 To access the llamastack CLI, exec into the container:"
# you can use the internal DNS name http://llama-stack:${PORT} log " $ENGINE exec -ti llama-stack bash"
log "" log ""