mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
Distributions updates (slight updates to ollama, add inline-vllm and remote-vllm) (#408)
* remote vllm distro * add inline-vllm details, fix things * Write some docs
This commit is contained in:
parent
ba82021d4b
commit
4986e46188
19 changed files with 365 additions and 46 deletions
1
distributions/inline-vllm/build.yaml
Symbolic link
1
distributions/inline-vllm/build.yaml
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../llama_stack/templates/inline-vllm/build.yaml
|
35
distributions/inline-vllm/compose.yaml
Normal file
35
distributions/inline-vllm/compose.yaml
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
services:
|
||||||
|
llamastack:
|
||||||
|
image: llamastack/distribution-inline-vllm
|
||||||
|
network_mode: "host"
|
||||||
|
volumes:
|
||||||
|
- ~/.llama:/root/.llama
|
||||||
|
- ./run.yaml:/root/my-run.yaml
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
devices:
|
||||||
|
- nvidia.com/gpu=all
|
||||||
|
environment:
|
||||||
|
- CUDA_VISIBLE_DEVICES=0
|
||||||
|
command: []
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
# that's the closest analogue to --gpus; provide
|
||||||
|
# an integer amount of devices or 'all'
|
||||||
|
count: 1
|
||||||
|
# Devices are reserved using a list of capabilities, making
|
||||||
|
# capabilities the only required field. A device MUST
|
||||||
|
# satisfy all the requested capabilities for a successful
|
||||||
|
# reservation.
|
||||||
|
capabilities: [gpu]
|
||||||
|
runtime: nvidia
|
||||||
|
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 3s
|
||||||
|
max_attempts: 5
|
||||||
|
window: 60s
|
66
distributions/inline-vllm/run.yaml
Normal file
66
distributions/inline-vllm/run.yaml
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
version: '2'
|
||||||
|
built_at: '2024-10-08T17:40:45.325529'
|
||||||
|
image_name: local
|
||||||
|
docker_image: null
|
||||||
|
conda_env: local
|
||||||
|
apis:
|
||||||
|
- shields
|
||||||
|
- agents
|
||||||
|
- models
|
||||||
|
- memory
|
||||||
|
- memory_banks
|
||||||
|
- inference
|
||||||
|
- safety
|
||||||
|
providers:
|
||||||
|
inference:
|
||||||
|
- provider_id: vllm-inference
|
||||||
|
provider_type: inline::vllm
|
||||||
|
config:
|
||||||
|
model: Llama3.2-3B-Instruct
|
||||||
|
tensor_parallel_size: 1
|
||||||
|
gpu_memory_utilization: 0.4
|
||||||
|
enforce_eager: true
|
||||||
|
max_tokens: 4096
|
||||||
|
- provider_id: vllm-safety
|
||||||
|
provider_type: inline::vllm
|
||||||
|
config:
|
||||||
|
model: Llama-Guard-3-1B
|
||||||
|
tensor_parallel_size: 1
|
||||||
|
gpu_memory_utilization: 0.2
|
||||||
|
enforce_eager: true
|
||||||
|
max_tokens: 4096
|
||||||
|
safety:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config:
|
||||||
|
llama_guard_shield:
|
||||||
|
model: Llama-Guard-3-1B
|
||||||
|
excluded_categories: []
|
||||||
|
# Uncomment to use prompt guard
|
||||||
|
# prompt_guard_shield:
|
||||||
|
# model: Prompt-Guard-86M
|
||||||
|
memory:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config: {}
|
||||||
|
# Uncomment to use pgvector
|
||||||
|
# - provider_id: pgvector
|
||||||
|
# provider_type: remote::pgvector
|
||||||
|
# config:
|
||||||
|
# host: 127.0.0.1
|
||||||
|
# port: 5432
|
||||||
|
# db: postgres
|
||||||
|
# user: postgres
|
||||||
|
# password: mysecretpassword
|
||||||
|
agents:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config:
|
||||||
|
persistence_store:
|
||||||
|
namespace: null
|
||||||
|
type: sqlite
|
||||||
|
db_path: ~/.llama/runtime/agents_store.db
|
||||||
|
telemetry:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config: {}
|
1
distributions/ollama-gpu/build.yaml
Symbolic link
1
distributions/ollama-gpu/build.yaml
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../llama_stack/templates/ollama/build.yaml
|
1
distributions/remote-vllm/build.yaml
Symbolic link
1
distributions/remote-vllm/build.yaml
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../llama_stack/templates/remote-vllm/build.yaml
|
48
distributions/remote-vllm/compose.yaml
Normal file
48
distributions/remote-vllm/compose.yaml
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
services:
|
||||||
|
vllm:
|
||||||
|
image: vllm/vllm-openai:latest
|
||||||
|
network_mode: "host"
|
||||||
|
volumes:
|
||||||
|
- $HOME/.cache/huggingface:/root/.cache/huggingface
|
||||||
|
ports:
|
||||||
|
- "8000:8000"
|
||||||
|
devices:
|
||||||
|
- nvidia.com/gpu=all
|
||||||
|
environment:
|
||||||
|
- CUDA_VISIBLE_DEVICES=0
|
||||||
|
command: []
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
# that's the closest analogue to --gpus; provide
|
||||||
|
# an integer amount of devices or 'all'
|
||||||
|
count: 1
|
||||||
|
# Devices are reserved using a list of capabilities, making
|
||||||
|
# capabilities the only required field. A device MUST
|
||||||
|
# satisfy all the requested capabilities for a successful
|
||||||
|
# reservation.
|
||||||
|
capabilities: [gpu]
|
||||||
|
runtime: nvidia
|
||||||
|
llamastack:
|
||||||
|
depends_on:
|
||||||
|
- vllm
|
||||||
|
image: llamastack/distribution-remote-vllm
|
||||||
|
network_mode: "host"
|
||||||
|
volumes:
|
||||||
|
- ~/.llama:/root/.llama
|
||||||
|
# Link to ollama run.yaml file
|
||||||
|
- ./run.yaml:/root/llamastack-run-remote-vllm.yaml
|
||||||
|
ports:
|
||||||
|
- "5000:5000"
|
||||||
|
# Hack: wait for vllm server to start before starting docker
|
||||||
|
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml"
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 3s
|
||||||
|
max_attempts: 5
|
||||||
|
window: 60s
|
||||||
|
volumes:
|
||||||
|
vllm:
|
46
distributions/remote-vllm/run.yaml
Normal file
46
distributions/remote-vllm/run.yaml
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
version: '2'
|
||||||
|
built_at: '2024-10-08T17:40:45.325529'
|
||||||
|
image_name: local
|
||||||
|
docker_image: null
|
||||||
|
conda_env: local
|
||||||
|
apis:
|
||||||
|
- shields
|
||||||
|
- agents
|
||||||
|
- models
|
||||||
|
- memory
|
||||||
|
- memory_banks
|
||||||
|
- inference
|
||||||
|
- safety
|
||||||
|
providers:
|
||||||
|
inference:
|
||||||
|
- provider_id: vllm0
|
||||||
|
provider_type: remote::vllm
|
||||||
|
config:
|
||||||
|
url: http://127.0.0.1:8000
|
||||||
|
safety:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config:
|
||||||
|
llama_guard_shield:
|
||||||
|
model: Llama-Guard-3-1B
|
||||||
|
excluded_categories: []
|
||||||
|
disable_input_check: false
|
||||||
|
disable_output_check: false
|
||||||
|
prompt_guard_shield:
|
||||||
|
model: Prompt-Guard-86M
|
||||||
|
memory:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config: {}
|
||||||
|
agents:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config:
|
||||||
|
persistence_store:
|
||||||
|
namespace: null
|
||||||
|
type: sqlite
|
||||||
|
db_path: ~/.llama/runtime/kvstore.db
|
||||||
|
telemetry:
|
||||||
|
- provider_id: meta0
|
||||||
|
provider_type: meta-reference
|
||||||
|
config: {}
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/vllm/build.yaml
|
|
|
@ -2,25 +2,35 @@
|
||||||
|
|
||||||
The `llamastack/distribution-ollama` distribution consists of the following provider configurations.
|
The `llamastack/distribution-ollama` distribution consists of the following provider configurations.
|
||||||
|
|
||||||
| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** |
|
| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** |
|
||||||
|----------------- |---------------- |---------------- |---------------------------------- |---------------- |---------------- |
|
|----------------- |---------------- |---------------- |------------------------------------ |---------------- |---------------- |
|
||||||
| **Provider(s)** | remote::ollama | meta-reference | remote::pgvector, remote::chroma | remote::ollama | meta-reference |
|
| **Provider(s)** | remote::ollama | meta-reference | remote::pgvector, remote::chromadb | meta-reference | meta-reference |
|
||||||
|
|
||||||
|
|
||||||
### Docker: Start a Distribution (Single Node GPU)
|
## Using Docker Compose
|
||||||
|
|
||||||
|
You can use `docker compose` to start a Ollama server and connect with Llama Stack server in a single command.
|
||||||
|
|
||||||
|
### Docker: Start the Distribution (Single Node regular Desktop machine)
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> This will start an ollama server with CPU only, please see [Ollama Documentations](https://github.com/ollama/ollama) for serving models on CPU only.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd distributions/ollama; docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker: Start a Distribution (Single Node with nvidia GPUs)
|
||||||
|
|
||||||
> [!NOTE]
|
> [!NOTE]
|
||||||
> This assumes you have access to GPU to start a Ollama server with access to your GPU.
|
> This assumes you have access to GPU to start a Ollama server with access to your GPU.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
$ cd distributions/ollama/gpu
|
$ cd distributions/ollama-gpu; docker compose up
|
||||||
$ ls
|
|
||||||
compose.yaml run.yaml
|
|
||||||
$ docker compose up
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You will see outputs similar to following ---
|
You will see outputs similar to following ---
|
||||||
```
|
```bash
|
||||||
[ollama] | [GIN] 2024/10/18 - 21:19:41 | 200 | 226.841µs | ::1 | GET "/api/ps"
|
[ollama] | [GIN] 2024/10/18 - 21:19:41 | 200 | 226.841µs | ::1 | GET "/api/ps"
|
||||||
[ollama] | [GIN] 2024/10/18 - 21:19:42 | 200 | 60.908µs | ::1 | GET "/api/ps"
|
[ollama] | [GIN] 2024/10/18 - 21:19:42 | 200 | 60.908µs | ::1 | GET "/api/ps"
|
||||||
INFO: Started server process [1]
|
INFO: Started server process [1]
|
||||||
|
@ -34,36 +44,24 @@ INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit)
|
||||||
```
|
```
|
||||||
|
|
||||||
To kill the server
|
To kill the server
|
||||||
```
|
```bash
|
||||||
docker compose down
|
docker compose down
|
||||||
```
|
```
|
||||||
|
|
||||||
### Docker: Start the Distribution (Single Node CPU)
|
## Starting Ollama and Llama Stack separately
|
||||||
|
|
||||||
> [!NOTE]
|
If you wish to separately spin up a Ollama server, and connect with Llama Stack, you should use the following commands.
|
||||||
> This will start an ollama server with CPU only, please see [Ollama Documentations](https://github.com/ollama/ollama) for serving models on CPU only.
|
|
||||||
|
|
||||||
```
|
#### Start Ollama server
|
||||||
$ cd distributions/ollama/cpu
|
- Please check the [Ollama Documentation](https://github.com/ollama/ollama) for more details.
|
||||||
$ ls
|
|
||||||
compose.yaml run.yaml
|
|
||||||
$ docker compose up
|
|
||||||
```
|
|
||||||
|
|
||||||
### Conda: ollama run + llama stack run
|
|
||||||
|
|
||||||
If you wish to separately spin up a Ollama server, and connect with Llama Stack, you may use the following commands.
|
|
||||||
|
|
||||||
#### Start Ollama server.
|
|
||||||
- Please check the [Ollama Documentations](https://github.com/ollama/ollama) for more details.
|
|
||||||
|
|
||||||
**Via Docker**
|
**Via Docker**
|
||||||
```
|
```bash
|
||||||
docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
**Via CLI**
|
**Via CLI**
|
||||||
```
|
```bash
|
||||||
ollama run <model_id>
|
ollama run <model_id>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -71,7 +69,7 @@ ollama run <model_id>
|
||||||
|
|
||||||
**Via Conda**
|
**Via Conda**
|
||||||
|
|
||||||
```
|
```bash
|
||||||
llama stack build --template ollama --image-type conda
|
llama stack build --template ollama --image-type conda
|
||||||
llama stack run ./gpu/run.yaml
|
llama stack run ./gpu/run.yaml
|
||||||
```
|
```
|
||||||
|
@ -82,7 +80,7 @@ docker run --network host -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./gpu/run
|
||||||
```
|
```
|
||||||
|
|
||||||
Make sure in your `run.yaml` file, your inference provider is pointing to the correct Ollama endpoint. E.g.
|
Make sure in your `run.yaml` file, your inference provider is pointing to the correct Ollama endpoint. E.g.
|
||||||
```
|
```yaml
|
||||||
inference:
|
inference:
|
||||||
- provider_id: ollama0
|
- provider_id: ollama0
|
||||||
provider_type: remote::ollama
|
provider_type: remote::ollama
|
||||||
|
@ -96,7 +94,7 @@ inference:
|
||||||
|
|
||||||
You can use ollama for managing model downloads.
|
You can use ollama for managing model downloads.
|
||||||
|
|
||||||
```
|
```bash
|
||||||
ollama pull llama3.1:8b-instruct-fp16
|
ollama pull llama3.1:8b-instruct-fp16
|
||||||
ollama pull llama3.1:70b-instruct-fp16
|
ollama pull llama3.1:70b-instruct-fp16
|
||||||
```
|
```
|
||||||
|
@ -106,7 +104,7 @@ ollama pull llama3.1:70b-instruct-fp16
|
||||||
|
|
||||||
|
|
||||||
To serve a new model with `ollama`
|
To serve a new model with `ollama`
|
||||||
```
|
```bash
|
||||||
ollama run <model_name>
|
ollama run <model_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -119,7 +117,7 @@ llama3.1:8b-instruct-fp16 4aacac419454 17 GB 100% GPU 4 minutes fro
|
||||||
```
|
```
|
||||||
|
|
||||||
To verify that the model served by ollama is correctly connected to Llama Stack server
|
To verify that the model served by ollama is correctly connected to Llama Stack server
|
||||||
```
|
```bash
|
||||||
$ llama-stack-client models list
|
$ llama-stack-client models list
|
||||||
+----------------------+----------------------+---------------+-----------------------------------------------+
|
+----------------------+----------------------+---------------+-----------------------------------------------+
|
||||||
| identifier | llama_model | provider_id | metadata |
|
| identifier | llama_model | provider_id | metadata |
|
||||||
|
|
|
@ -0,0 +1,83 @@
|
||||||
|
# Remote vLLM Distribution
|
||||||
|
|
||||||
|
The `llamastack/distribution-remote-vllm` distribution consists of the following provider configurations.
|
||||||
|
|
||||||
|
| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** |
|
||||||
|
|----------------- |---------------- |---------------- |------------------------------------ |---------------- |---------------- |
|
||||||
|
| **Provider(s)** | remote::vllm | meta-reference | remote::pgvector, remote::chromadb | meta-reference | meta-reference |
|
||||||
|
|
||||||
|
You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference.
|
||||||
|
|
||||||
|
## Using Docker Compose
|
||||||
|
|
||||||
|
You can use `docker compose` to start a vLLM container and Llama Stack server container together.
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> This assumes you have access to GPU to start a vLLM server with access to your GPU.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cd distributions/remote-vllm; docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
You will see outputs similar to following ---
|
||||||
|
```
|
||||||
|
<TO BE FILLED>
|
||||||
|
```
|
||||||
|
|
||||||
|
To kill the server
|
||||||
|
```bash
|
||||||
|
docker compose down
|
||||||
|
```
|
||||||
|
|
||||||
|
## Starting vLLM and Llama Stack separately
|
||||||
|
|
||||||
|
You may want to start a vLLM server and connect with Llama Stack manually. There are two ways to start a vLLM server and connect with Llama Stack.
|
||||||
|
|
||||||
|
|
||||||
|
#### Start vLLM server.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --runtime nvidia --gpus all \
|
||||||
|
-v ~/.cache/huggingface:/root/.cache/huggingface \
|
||||||
|
--env "HUGGING_FACE_HUB_TOKEN=<secret>" \
|
||||||
|
-p 8000:8000 \
|
||||||
|
--ipc=host \
|
||||||
|
vllm/vllm-openai:latest \
|
||||||
|
--model meta-llama/Llama-3.1-8B-Instruct
|
||||||
|
```
|
||||||
|
|
||||||
|
Please check the [vLLM Documentation](https://docs.vllm.ai/en/v0.5.5/serving/deploying_with_docker.html) for more details.
|
||||||
|
|
||||||
|
|
||||||
|
#### Start Llama Stack server pointing to your vLLM server
|
||||||
|
|
||||||
|
|
||||||
|
We have provided a template `run.yaml` file in the `distributions/remote-vllm` directory. Please make sure to modify the `inference.provider_id` to point to your vLLM server endpoint. As an example, if your vLLM server is running on `http://127.0.0.1:8000`, your `run.yaml` file should look like the following:
|
||||||
|
```yaml
|
||||||
|
inference:
|
||||||
|
- provider_id: vllm0
|
||||||
|
provider_type: remote::vllm
|
||||||
|
config:
|
||||||
|
url: http://127.0.0.1:8000
|
||||||
|
```
|
||||||
|
|
||||||
|
**Via Conda**
|
||||||
|
|
||||||
|
If you are using Conda, you can build and run the Llama Stack server with the following commands:
|
||||||
|
```bash
|
||||||
|
cd distributions/remote-vllm
|
||||||
|
llama stack build --template remote_vllm --image-type conda
|
||||||
|
llama stack run run.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Via Docker**
|
||||||
|
|
||||||
|
You can use the Llama Stack Docker image to start the server with the following command:
|
||||||
|
```bash
|
||||||
|
docker run --network host -it -p 5000:5000 \
|
||||||
|
-v ~/.llama:/root/.llama \
|
||||||
|
-v ./gpu/run.yaml:/root/llamastack-run-remote-vllm.yaml \
|
||||||
|
--gpus=all \
|
||||||
|
llamastack/distribution-remote-vllm \
|
||||||
|
--yaml_config /root/llamastack-run-remote-vllm.yaml
|
||||||
|
```
|
|
@ -80,6 +80,11 @@ Llama3.1-8B-Instruct Llama3.2-1B Llama3.2-3B-Instruct Llama-
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
:::{tab-item} vLLM
|
||||||
|
##### System Requirements
|
||||||
|
Access to Single-Node GPU to start a vLLM server.
|
||||||
|
:::
|
||||||
|
|
||||||
:::{tab-item} tgi
|
:::{tab-item} tgi
|
||||||
##### System Requirements
|
##### System Requirements
|
||||||
Access to Single-Node GPU to start a TGI server.
|
Access to Single-Node GPU to start a TGI server.
|
||||||
|
@ -119,6 +124,22 @@ docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run.
|
||||||
```
|
```
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
:::{tab-item} vLLM
|
||||||
|
```
|
||||||
|
$ cd llama-stack/distributions/remote-vllm && docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
|
The script will first start up vLLM server on port 8000, then start up Llama Stack distribution server hooking up to it for inference. You should see the following outputs --
|
||||||
|
```
|
||||||
|
<TO BE FILLED>
|
||||||
|
```
|
||||||
|
|
||||||
|
To kill the server
|
||||||
|
```
|
||||||
|
docker compose down
|
||||||
|
```
|
||||||
|
:::
|
||||||
|
|
||||||
:::{tab-item} tgi
|
:::{tab-item} tgi
|
||||||
```
|
```
|
||||||
$ cd llama-stack/distributions/tgi && docker compose up
|
$ cd llama-stack/distributions/tgi && docker compose up
|
||||||
|
@ -144,7 +165,11 @@ docker compose down
|
||||||
|
|
||||||
:::{tab-item} ollama
|
:::{tab-item} ollama
|
||||||
```
|
```
|
||||||
$ cd llama-stack/distributions/ollama/cpu && docker compose up
|
$ cd llama-stack/distributions/ollama && docker compose up
|
||||||
|
|
||||||
|
# OR
|
||||||
|
|
||||||
|
$ cd llama-stack/distributions/ollama-gpu && docker compose up
|
||||||
```
|
```
|
||||||
|
|
||||||
You will see outputs similar to following ---
|
You will see outputs similar to following ---
|
||||||
|
|
|
@ -45,7 +45,7 @@ def available_providers() -> List[ProviderSpec]:
|
||||||
),
|
),
|
||||||
InlineProviderSpec(
|
InlineProviderSpec(
|
||||||
api=Api.inference,
|
api=Api.inference,
|
||||||
provider_type="vllm",
|
provider_type="inline::vllm",
|
||||||
pip_packages=[
|
pip_packages=[
|
||||||
"vllm",
|
"vllm",
|
||||||
],
|
],
|
||||||
|
|
13
llama_stack/templates/inline-vllm/build.yaml
Normal file
13
llama_stack/templates/inline-vllm/build.yaml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
name: meta-reference-gpu
|
||||||
|
distribution_spec:
|
||||||
|
docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime
|
||||||
|
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
||||||
|
providers:
|
||||||
|
inference: meta-reference
|
||||||
|
memory:
|
||||||
|
- meta-reference
|
||||||
|
- remote::chromadb
|
||||||
|
- remote::pgvector
|
||||||
|
safety: meta-reference
|
||||||
|
agents: meta-reference
|
||||||
|
telemetry: meta-reference
|
12
llama_stack/templates/remote-vllm/build.yaml
Normal file
12
llama_stack/templates/remote-vllm/build.yaml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
name: remote-vllm
|
||||||
|
distribution_spec:
|
||||||
|
description: Use (an external) vLLM server for running LLM inference
|
||||||
|
providers:
|
||||||
|
inference: remote::vllm
|
||||||
|
memory:
|
||||||
|
- meta-reference
|
||||||
|
- remote::chromadb
|
||||||
|
- remote::pgvector
|
||||||
|
safety: meta-reference
|
||||||
|
agents: meta-reference
|
||||||
|
telemetry: meta-reference
|
|
@ -1,9 +0,0 @@
|
||||||
name: vllm
|
|
||||||
distribution_spec:
|
|
||||||
description: Like local, but use vLLM for running LLM inference
|
|
||||||
providers:
|
|
||||||
inference: vllm
|
|
||||||
memory: meta-reference
|
|
||||||
safety: meta-reference
|
|
||||||
agents: meta-reference
|
|
||||||
telemetry: meta-reference
|
|
Loading…
Add table
Add a link
Reference in a new issue