This commit is contained in:
Xi Yan 2024-10-17 19:37:22 -07:00
parent 293d8f2895
commit 542ffbee72

View file

@ -31,11 +31,11 @@ services:
network_mode: "host" network_mode: "host"
volumes: volumes:
- ~/.llama:/root/.llama - ~/.llama:/root/.llama
# Link to TGI run.yaml file # Link to ollama run.yaml file
- ./ollama-run.yaml:/root/llamastack-run-ollama.yaml - ./ollama-run.yaml:/root/llamastack-run-ollama.yaml
ports: ports:
- "5000:5000" - "5000:5000"
# Hack: wait for TGI server to start before starting docker # Hack: wait for ollama server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-ollama.yaml" entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-ollama.yaml"
restart_policy: restart_policy:
condition: on-failure condition: on-failure