services: vllm: image: vllm/vllm-openai:latest network_mode: "host" volumes: - $HOME/.cache/huggingface:/root/.cache/huggingface ports: - "8000:8000" devices: - nvidia.com/gpu=all environment: - CUDA_VISIBLE_DEVICES=0 command: [] deploy: resources: reservations: devices: - driver: nvidia # that's the closest analogue to --gpus; provide # an integer amount of devices or 'all' count: 1 # Devices are reserved using a list of capabilities, making # capabilities the only required field. A device MUST # satisfy all the requested capabilities for a successful # reservation. capabilities: [gpu] runtime: nvidia llamastack: depends_on: - vllm image: llamastack/distribution-remote-vllm network_mode: "host" volumes: - ~/.llama:/root/.llama # Link to ollama run.yaml file - ./run.yaml:/root/llamastack-run-remote-vllm.yaml ports: - "5000:5000" # Hack: wait for vllm server to start before starting docker entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml" deploy: restart_policy: condition: on-failure delay: 3s max_attempts: 5 window: 60s volumes: vllm: