forked from phoenix-oss/llama-stack-mirror
Distributions updates (slight updates to ollama, add inline-vllm and remote-vllm) (#408)
* remote vllm distro * add inline-vllm details, fix things * Write some docs
This commit is contained in:
parent
ba82021d4b
commit
4986e46188
19 changed files with 365 additions and 46 deletions
1
distributions/remote-vllm/build.yaml
Symbolic link
1
distributions/remote-vllm/build.yaml
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../llama_stack/templates/remote-vllm/build.yaml
|
48
distributions/remote-vllm/compose.yaml
Normal file
48
distributions/remote-vllm/compose.yaml
Normal file
|
@ -0,0 +1,48 @@
|
|||
services:
|
||||
vllm:
|
||||
image: vllm/vllm-openai:latest
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- $HOME/.cache/huggingface:/root/.cache/huggingface
|
||||
ports:
|
||||
- "8000:8000"
|
||||
devices:
|
||||
- nvidia.com/gpu=all
|
||||
environment:
|
||||
- CUDA_VISIBLE_DEVICES=0
|
||||
command: []
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
# that's the closest analogue to --gpus; provide
|
||||
# an integer amount of devices or 'all'
|
||||
count: 1
|
||||
# Devices are reserved using a list of capabilities, making
|
||||
# capabilities the only required field. A device MUST
|
||||
# satisfy all the requested capabilities for a successful
|
||||
# reservation.
|
||||
capabilities: [gpu]
|
||||
runtime: nvidia
|
||||
llamastack:
|
||||
depends_on:
|
||||
- vllm
|
||||
image: llamastack/distribution-remote-vllm
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ~/.llama:/root/.llama
|
||||
# Link to ollama run.yaml file
|
||||
- ./run.yaml:/root/llamastack-run-remote-vllm.yaml
|
||||
ports:
|
||||
- "5000:5000"
|
||||
# Hack: wait for vllm server to start before starting docker
|
||||
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml"
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 3s
|
||||
max_attempts: 5
|
||||
window: 60s
|
||||
volumes:
|
||||
vllm:
|
46
distributions/remote-vllm/run.yaml
Normal file
46
distributions/remote-vllm/run.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
version: '2'
|
||||
built_at: '2024-10-08T17:40:45.325529'
|
||||
image_name: local
|
||||
docker_image: null
|
||||
conda_env: local
|
||||
apis:
|
||||
- shields
|
||||
- agents
|
||||
- models
|
||||
- memory
|
||||
- memory_banks
|
||||
- inference
|
||||
- safety
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: vllm0
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
url: http://127.0.0.1:8000
|
||||
safety:
|
||||
- provider_id: meta0
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
llama_guard_shield:
|
||||
model: Llama-Guard-3-1B
|
||||
excluded_categories: []
|
||||
disable_input_check: false
|
||||
disable_output_check: false
|
||||
prompt_guard_shield:
|
||||
model: Prompt-Guard-86M
|
||||
memory:
|
||||
- provider_id: meta0
|
||||
provider_type: meta-reference
|
||||
config: {}
|
||||
agents:
|
||||
- provider_id: meta0
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
persistence_store:
|
||||
namespace: null
|
||||
type: sqlite
|
||||
db_path: ~/.llama/runtime/kvstore.db
|
||||
telemetry:
|
||||
- provider_id: meta0
|
||||
provider_type: meta-reference
|
||||
config: {}
|
Loading…
Add table
Add a link
Reference in a new issue