mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-24 13:19:54 +00:00
Update more distribution docs to be simpler and partially codegen'ed
This commit is contained in:
parent
e84d4436b5
commit
2411a44833
51 changed files with 1188 additions and 291 deletions
35
distributions/vllm-gpu/compose.yaml
Normal file
35
distributions/vllm-gpu/compose.yaml
Normal file
|
@ -0,0 +1,35 @@
|
|||
services:
|
||||
llamastack:
|
||||
image: llamastack/distribution-inline-vllm
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ~/.llama:/root/.llama
|
||||
- ./run.yaml:/root/my-run.yaml
|
||||
ports:
|
||||
- "5000:5000"
|
||||
devices:
|
||||
- nvidia.com/gpu=all
|
||||
environment:
|
||||
- CUDA_VISIBLE_DEVICES=0
|
||||
command: []
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
# that's the closest analogue to --gpus; provide
|
||||
# an integer amount of devices or 'all'
|
||||
count: 1
|
||||
# Devices are reserved using a list of capabilities, making
|
||||
# capabilities the only required field. A device MUST
|
||||
# satisfy all the requested capabilities for a successful
|
||||
# reservation.
|
||||
capabilities: [gpu]
|
||||
runtime: nvidia
|
||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 3s
|
||||
max_attempts: 5
|
||||
window: 60s
|
Loading…
Add table
Add a link
Reference in a new issue