mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-23 21:04:29 +00:00
fix(install): explicit docker.io usage (#2850)
# What does this PR do? When podman is used and the registry is omitted, podman will prompt the user. However, we're piping the output of podman to /dev/null and the user will not see the prompt, the script will end abruptly and this is confusing. This commit explicitly uses the docker.io registry for the ollama image and the llama-stack image so that the prompt is avoided. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> ## Test Plan <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> I ran the script on a machine with podman and the issue was resolved ## Image Before the fix, this is what would happen: <img width="748" height="95" alt="image" src="https://github.com/user-attachments/assets/9c609f88-c0a8-45e7-a789-834f64f601e5" /> Signed-off-by: Omer Tuchfeld <omer@tuchfeld.dev>
This commit is contained in:
parent
20c3197952
commit
c1a63fcd87
1 changed files with 2 additions and 2 deletions
|
@ -15,7 +15,7 @@ set -Eeuo pipefail
|
|||
PORT=8321
|
||||
OLLAMA_PORT=11434
|
||||
MODEL_ALIAS="llama3.2:3b"
|
||||
SERVER_IMAGE="llamastack/distribution-ollama:0.2.2"
|
||||
SERVER_IMAGE="docker.io/llamastack/distribution-ollama:0.2.2"
|
||||
WAIT_TIMEOUT=300
|
||||
|
||||
log(){ printf "\e[1;32m%s\e[0m\n" "$*"; }
|
||||
|
@ -165,7 +165,7 @@ log "🦙 Starting Ollama…"
|
|||
$ENGINE run -d "${PLATFORM_OPTS[@]}" --name ollama-server \
|
||||
--network llama-net \
|
||||
-p "${OLLAMA_PORT}:${OLLAMA_PORT}" \
|
||||
ollama/ollama > /dev/null 2>&1
|
||||
docker.io/ollama/ollama > /dev/null 2>&1
|
||||
|
||||
if ! wait_for_service "http://localhost:${OLLAMA_PORT}/" "Ollama" "$WAIT_TIMEOUT" "Ollama daemon"; then
|
||||
log "❌ Ollama daemon did not become ready in ${WAIT_TIMEOUT}s; dumping container logs:"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue