Update vllm compose and run YAMLs

This commit is contained in:
Ashwin Bharambe 2024-11-12 12:46:32 -08:00
parent afe4a53ae8
commit 1245a625ce
2 changed files with 107 additions and 42 deletions

View file

@ -1,43 +1,83 @@
# NOTES:
#
# This Docker Compose (and the associated run.yaml) assumes you will be
# running in the default "bridged" network mode.
#
# If you need "host" network mode, please uncomment
# - network_mode: "host"
# and comment the lines with port mapping
# - ports:
# - "5100:5100"
#
# Similarly change "host.docker.internal" to "localhost" in the run.yaml file
#
services:
vllm:
vllm-0:
image: vllm/vllm-openai:latest
network_mode: "host"
volumes:
- $HOME/.cache/huggingface:/root/.cache/huggingface
# network_mode: "host"
ports:
- "8000:8000"
- "5100:5100"
devices:
- nvidia.com/gpu=all
environment:
- CUDA_VISIBLE_DEVICES=0
command: []
- CUDA_VISIBLE_DEVICES=4
- HUGGING_FACE_HUB_TOKEN=$HF_TOKEN
command: >
--gpu-memory-utilization 0.75
--model meta-llama/Llama-3.1-8B-Instruct
--enforce-eager
--max-model-len 8192
--max-num-seqs 16
--port 5100
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
runtime: nvidia
vllm-1:
image: vllm/vllm-openai:latest
volumes:
- $HOME/.cache/huggingface:/root/.cache/huggingface
# network_mode: "host"
ports:
- "5101:5101"
devices:
- nvidia.com/gpu=all
environment:
- CUDA_VISIBLE_DEVICES=5
- HUGGING_FACE_HUB_TOKEN=$HF_TOKEN
command: >
--gpu-memory-utilization 0.75
--model meta-llama/Llama-Guard-3-1B
--enforce-eager
--max-model-len 8192
--max-num-seqs 16
--port 5101
deploy:
resources:
reservations:
devices:
- driver: nvidia
# that's the closest analogue to --gpus; provide
# an integer amount of devices or 'all'
count: 1
# Devices are reserved using a list of capabilities, making
# capabilities the only required field. A device MUST
# satisfy all the requested capabilities for a successful
# reservation.
capabilities: [gpu]
runtime: nvidia
llamastack:
depends_on:
- vllm
image: llamastack/distribution-remote-vllm
network_mode: "host"
- vllm-0
- vllm-1
# image: llamastack/distribution-remote-vllm
image: localhost/distribution-remote-vllm:test-0.0.52rc3
volumes:
- ~/.llama:/root/.llama
# Link to ollama run.yaml file
- ./run.yaml:/root/llamastack-run-remote-vllm.yaml
- ~/local/llama-stack/distributions/remote-vllm/run.yaml:/root/llamastack-run-remote-vllm.yaml
# network_mode: "host"
ports:
- "5000:5000"
# Hack: wait for vllm server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml"
- "5001:5001"
# Hack: wait for vLLM server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml --port 5001"
deploy:
restart_policy:
condition: on-failure
@ -45,4 +85,6 @@ services:
max_attempts: 5
window: 60s
volumes:
vllm:
vllm-0:
vllm-1:
llamastack: