mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
chore(ci): remove redundant steps and simplify network setup
This commit is contained in:
parent
d4e5d4c1fa
commit
19ad7ba513
2 changed files with 22 additions and 30 deletions
6
.github/workflows/install-script-ci.yml
vendored
6
.github/workflows/install-script-ci.yml
vendored
|
@ -23,15 +23,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Make installer executable
|
||||
run: chmod +x install.sh
|
||||
- name: Run installer end-to-end
|
||||
run: ./install.sh
|
||||
- name: Wait for health endpoint
|
||||
run: |
|
||||
timeout 120 bash -c \
|
||||
'until curl -fsS http://localhost:8321/v1/health; do sleep 1; done'
|
||||
- name: Cleanup
|
||||
run: |
|
||||
docker rm -f ollama-server llama-stack || true
|
||||
docker network rm llama-stack-net || true
|
||||
|
|
46
install.sh
46
install.sh
|
@ -12,24 +12,28 @@ OLLAMA_PORT=11434
|
|||
MODEL_ALIAS="llama3.2:3b"
|
||||
SERVER_IMAGE="llamastack/distribution-ollama:0.2.2"
|
||||
WAIT_TIMEOUT=300
|
||||
NETWORK="llama-stack-net"
|
||||
|
||||
log(){ printf "\e[1;32m%s\e[0m\n" "$*"; }
|
||||
die(){ printf "\e[1;31m❌ %s\e[0m\n" "$*" >&2; exit 1; }
|
||||
|
||||
if command -v docker &> /dev/null; then ENGINE="docker"
|
||||
elif command -v podman &> /dev/null; then ENGINE="podman"
|
||||
else die "Docker or Podman is required. Install Docker: https://docs.docker.com/get-docker/ or Podman: https://podman.io/getting-started/installation"
|
||||
fi
|
||||
|
||||
if ! $ENGINE network ls --filter name=^${NETWORK}$ --format '{{.Name}}' | grep -q "^${NETWORK}$"; then
|
||||
log "Creating Docker network: ${NETWORK}"
|
||||
$ENGINE network create "${NETWORK}"
|
||||
if command -v docker &> /dev/null; then
|
||||
ENGINE="docker"
|
||||
HOST_DNS="host.docker.internal"
|
||||
elif command -v podman &> /dev/null; then
|
||||
ENGINE="podman"
|
||||
HOST_DNS="host.containers.internal"
|
||||
else
|
||||
die "Docker or Podman is required. Install Docker: https://docs.docker.com/get-docker/ or Podman: https://podman.io/getting-started/installation"
|
||||
fi
|
||||
|
||||
# Clean up any leftovers from earlier runs
|
||||
for name in ollama-server llama-stack; do
|
||||
$ENGINE ps -aq --filter "name=^${name}$" | xargs -r $ENGINE rm -f
|
||||
# detect existing containers
|
||||
ids=$($ENGINE ps -aq --filter "name=^${name}$")
|
||||
if [ -n "$ids" ]; then
|
||||
log "⚠️ Found existing container(s) for '${name}', removing..."
|
||||
$ENGINE rm -f $ids
|
||||
fi
|
||||
done
|
||||
|
||||
###############################################################################
|
||||
|
@ -37,7 +41,6 @@ done
|
|||
###############################################################################
|
||||
log "🦙 Starting Ollama…"
|
||||
$ENGINE run -d --name ollama-server \
|
||||
--network "${NETWORK}" \
|
||||
-p "${OLLAMA_PORT}:11434" \
|
||||
-v ollama-models:/root/.ollama \
|
||||
ollama/ollama >/dev/null
|
||||
|
@ -47,25 +50,20 @@ timeout "$WAIT_TIMEOUT" bash -c \
|
|||
"until curl -fsS http://localhost:${OLLAMA_PORT}/ 2>/dev/null | grep -q 'Ollama'; do sleep 1; done" \
|
||||
|| die "Ollama did not become ready in ${WAIT_TIMEOUT}s"
|
||||
|
||||
if ! $ENGINE exec ollama-server ollama list | grep -q "$MODEL_ALIAS"; then
|
||||
log "📦 Pulling model $MODEL_ALIAS…"
|
||||
$ENGINE exec ollama-server ollama pull "$MODEL_ALIAS"
|
||||
fi
|
||||
|
||||
log "🚀 Launching model runtime…"
|
||||
$ENGINE exec -d ollama-server ollama run "$MODEL_ALIAS" --keepalive 60m
|
||||
log "📦 Ensuring model is pulled: ${MODEL_ALIAS}..."
|
||||
$ENGINE exec ollama-server ollama pull "${MODEL_ALIAS}"
|
||||
|
||||
###############################################################################
|
||||
# 2. Llama‑Stack
|
||||
###############################################################################
|
||||
log "🦙📦 Starting Llama‑Stack…"
|
||||
$ENGINE run -d --name llama-stack \
|
||||
--network "${NETWORK}" \
|
||||
-p "${PORT}:${PORT}" \
|
||||
--add-host="${HOST_DNS}:host-gateway" \
|
||||
"${SERVER_IMAGE}" \
|
||||
--port "${PORT}" \
|
||||
--env INFERENCE_MODEL="${MODEL_ALIAS}" \
|
||||
--env OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}" >/dev/null
|
||||
--env OLLAMA_URL="http://${HOST_DNS}:${OLLAMA_PORT}" >/dev/null
|
||||
|
||||
log "⏳ Waiting for Llama‑Stack API…"
|
||||
timeout "$WAIT_TIMEOUT" bash -c \
|
||||
|
@ -78,7 +76,7 @@ timeout "$WAIT_TIMEOUT" bash -c \
|
|||
log ""
|
||||
log "🎉 Llama‑Stack is ready!"
|
||||
log "👉 API endpoint: http://localhost:${PORT}"
|
||||
|
||||
# Note: if you’re calling from another container on the “${NETWORK}” network,
|
||||
# you can use the internal DNS name http://llama-stack:${PORT}
|
||||
log ""
|
||||
log "📖 Documentation: https://llama-stack.readthedocs.io/en/latest/references/index.html"
|
||||
log "💻 To access the llama‑stack CLI, exec into the container:"
|
||||
log " $ENGINE exec -ti llama-stack bash"
|
||||
log ""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue