Add ollama/pull-models.sh

This commit is contained in:
Ashwin Bharambe 2024-11-18 10:57:20 -08:00
parent fa1d29cfdc
commit 1ecaf2cb3c
16 changed files with 305 additions and 289 deletions

View file

@ -1,30 +1,71 @@
services:
ollama:
image: ollama/ollama:latest
network_mode: "host"
network_mode: ${NETWORK_MODE:-bridge}
volumes:
- ollama:/root/.ollama # this solution synchronizes with the docker volume and loads the model rocket fast
- ~/.ollama:/root/.ollama
ports:
- "11434:11434"
environment:
OLLAMA_DEBUG: 1
command: []
deploy:
resources:
limits:
memory: 8G # Set maximum memory
reservations:
memory: 8G # Set minimum memory reservation
# healthcheck:
# # ugh, no CURL in ollama image
# test: ["CMD", "curl", "-f", "http://ollama:11434"]
# interval: 10s
# timeout: 5s
# retries: 5
ollama-init:
image: ollama/ollama:latest
depends_on:
- ollama
# condition: service_healthy
network_mode: ${NETWORK_MODE:-bridge}
environment:
- OLLAMA_HOST=ollama
- INFERENCE_MODEL=${INFERENCE_MODEL}
- SAFETY_MODEL=${SAFETY_MODEL:-}
volumes:
- ~/.ollama:/root/.ollama
- ./pull-models.sh:/pull-models.sh
entrypoint: ["/pull-models.sh"]
llamastack:
depends_on:
- ollama
image: llamastack/distribution-ollama
network_mode: "host"
ollama:
condition: service_started
ollama-init:
condition: service_started
image: ${LLAMA_STACK_IMAGE:-llamastack/distribution-ollama}
network_mode: ${NETWORK_MODE:-bridge}
volumes:
- ~/.llama:/root/.llama
# Link to ollama run.yaml file
- ./run.yaml:/root/my-run.yaml
- ~/local/llama-stack/:/app/llama-stack-source
- ./run${SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml
ports:
- "5000:5000"
# Hack: wait for ollama server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
- "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}"
environment:
- INFERENCE_MODEL=${INFERENCE_MODEL}
- SAFETY_MODEL=${SAFETY_MODEL:-}
- OLLAMA_URL=http://ollama:11434
entrypoint: >
python -m llama_stack.distribution.server.server /root/my-run.yaml \
--port ${LLAMA_STACK_PORT:-5001}
deploy:
restart_policy:
condition: on-failure
delay: 3s
max_attempts: 5
delay: 10s
max_attempts: 3
window: 60s
volumes:
ollama:
ollama-init:
llamastack:

View file

@ -0,0 +1,18 @@
#!/bin/sh
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
echo "Preloading (${INFERENCE_MODEL}, ${SAFETY_MODEL})..."
for model in ${INFERENCE_MODEL} ${SAFETY_MODEL}; do
echo "Preloading $model..."
if ! ollama run "$model"; then
echo "Failed to pull and run $model"
exit 1
fi
done
echo "All models pulled successfully"

View file

@ -1,13 +1,12 @@
version: '2'
built_at: 2024-11-17 19:33:00
image_name: ollama
docker_image: null
conda_env: null
apis:
- memory
- agents
- safety
- inference
- memory
- safety
- telemetry
providers:
inference:
@ -46,11 +45,11 @@ metadata_store:
models:
- metadata: {}
model_id: ${env.INFERENCE_MODEL}
provider_id: ollama-inference
provider_id: ollama
provider_model_id: null
- metadata: {}
model_id: ${env.SAFETY_MODEL}
provider_id: ollama-safety
provider_id: ollama
provider_model_id: null
shields:
- params: null

View file

@ -3,10 +3,10 @@ image_name: ollama
docker_image: null
conda_env: null
apis:
- memory
- agents
- safety
- inference
- memory
- safety
- telemetry
providers:
inference:
@ -45,7 +45,7 @@ metadata_store:
models:
- metadata: {}
model_id: ${env.INFERENCE_MODEL}
provider_id: ollama-inference
provider_id: ollama
provider_model_id: null
shields: []
memory_banks: []