chore(installer): fully silence container output by redirecting stderr

Co-authored-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Alexey Rybak 2025-04-24 10:21:07 -07:00 committed by reluctantfuturist
parent 876fd6e80b
commit 0ae46f9417

View file

@ -43,7 +43,7 @@ log "🦙 Starting Ollama…"
$ENGINE run -d --name ollama-server \ $ENGINE run -d --name ollama-server \
-p "${OLLAMA_PORT}:11434" \ -p "${OLLAMA_PORT}:11434" \
-v ollama-models:/root/.ollama \ -v ollama-models:/root/.ollama \
ollama/ollama >/dev/null ollama/ollama > /dev/null 2>&1
log "⏳ Waiting for Ollama daemon…" log "⏳ Waiting for Ollama daemon…"
timeout "$WAIT_TIMEOUT" bash -c \ timeout "$WAIT_TIMEOUT" bash -c \
@ -51,7 +51,7 @@ timeout "$WAIT_TIMEOUT" bash -c \
|| die "Ollama did not become ready in ${WAIT_TIMEOUT}s" || die "Ollama did not become ready in ${WAIT_TIMEOUT}s"
log "📦 Ensuring model is pulled: ${MODEL_ALIAS}..." log "📦 Ensuring model is pulled: ${MODEL_ALIAS}..."
$ENGINE exec ollama-server ollama pull "${MODEL_ALIAS}" $ENGINE exec ollama-server ollama pull "${MODEL_ALIAS}" > /dev/null 2>&1
############################################################################### ###############################################################################
# 2. LlamaStack # 2. LlamaStack
@ -63,7 +63,7 @@ $ENGINE run -d --name llama-stack \
"${SERVER_IMAGE}" \ "${SERVER_IMAGE}" \
--port "${PORT}" \ --port "${PORT}" \
--env INFERENCE_MODEL="${MODEL_ALIAS}" \ --env INFERENCE_MODEL="${MODEL_ALIAS}" \
--env OLLAMA_URL="http://${HOST_DNS}:${OLLAMA_PORT}" >/dev/null --env OLLAMA_URL="http://${HOST_DNS}:${OLLAMA_PORT}" > /dev/null 2>&1
log "⏳ Waiting for LlamaStack API…" log "⏳ Waiting for LlamaStack API…"
timeout "$WAIT_TIMEOUT" bash -c \ timeout "$WAIT_TIMEOUT" bash -c \