llama-stack-mirror/docker/llamastack/ollama/compose.yaml
2024-11-08 14:55:04 -08:00

19 lines
539 B
YAML

services:
llamastack:
depends_on:
- ollama
image: llamastack/distribution-ollama
volumes:
- ~/.llama:/root/.llama
# Link to run.yaml file
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
# Hack: wait for server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
deploy:
restart_policy:
condition: on-failure
delay: 3s
max_attempts: 5
window: 60s