diff --git a/distributions/inline-vllm/build.yaml b/distributions/inline-vllm/build.yaml new file mode 120000 index 000000000..a95d34c1f --- /dev/null +++ b/distributions/inline-vllm/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/inline-vllm/build.yaml \ No newline at end of file diff --git a/distributions/inline-vllm/compose.yaml b/distributions/inline-vllm/compose.yaml new file mode 100644 index 000000000..f8779c9ce --- /dev/null +++ b/distributions/inline-vllm/compose.yaml @@ -0,0 +1,35 @@ +services: + llamastack: + image: llamastack/distribution-inline-vllm + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/my-run.yaml + ports: + - "5000:5000" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=0 + command: [] + deploy: + resources: + reservations: + devices: + - driver: nvidia + # that's the closest analogue to --gpus; provide + # an integer amount of devices or 'all' + count: 1 + # Devices are reserved using a list of capabilities, making + # capabilities the only required field. A device MUST + # satisfy all the requested capabilities for a successful + # reservation. + capabilities: [gpu] + runtime: nvidia + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/inline-vllm/run.yaml b/distributions/inline-vllm/run.yaml new file mode 100644 index 000000000..aadf5c0ce --- /dev/null +++ b/distributions/inline-vllm/run.yaml @@ -0,0 +1,66 @@ +version: '2' +built_at: '2024-10-08T17:40:45.325529' +image_name: local +docker_image: null +conda_env: local +apis: +- shields +- agents +- models +- memory +- memory_banks +- inference +- safety +providers: + inference: + - provider_id: vllm-inference + provider_type: inline::vllm + config: + model: Llama3.2-3B-Instruct + tensor_parallel_size: 1 + gpu_memory_utilization: 0.4 + enforce_eager: true + max_tokens: 4096 + - provider_id: vllm-safety + provider_type: inline::vllm + config: + model: Llama-Guard-3-1B + tensor_parallel_size: 1 + gpu_memory_utilization: 0.2 + enforce_eager: true + max_tokens: 4096 + safety: + - provider_id: meta0 + provider_type: meta-reference + config: + llama_guard_shield: + model: Llama-Guard-3-1B + excluded_categories: [] +# Uncomment to use prompt guard +# prompt_guard_shield: +# model: Prompt-Guard-86M + memory: + - provider_id: meta0 + provider_type: meta-reference + config: {} + # Uncomment to use pgvector + # - provider_id: pgvector + # provider_type: remote::pgvector + # config: + # host: 127.0.0.1 + # port: 5432 + # db: postgres + # user: postgres + # password: mysecretpassword + agents: + - provider_id: meta0 + provider_type: meta-reference + config: + persistence_store: + namespace: null + type: sqlite + db_path: ~/.llama/runtime/agents_store.db + telemetry: + - provider_id: meta0 + provider_type: meta-reference + config: {} diff --git a/distributions/ollama-gpu/build.yaml b/distributions/ollama-gpu/build.yaml new file mode 120000 index 000000000..8772548e0 --- /dev/null +++ b/distributions/ollama-gpu/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/ollama/build.yaml \ No newline at end of file diff --git a/distributions/ollama/gpu/compose.yaml b/distributions/ollama-gpu/compose.yaml similarity index 100% rename from distributions/ollama/gpu/compose.yaml rename to distributions/ollama-gpu/compose.yaml diff --git a/distributions/ollama/cpu/run.yaml b/distributions/ollama-gpu/run.yaml similarity index 100% rename from distributions/ollama/cpu/run.yaml rename to distributions/ollama-gpu/run.yaml diff --git a/distributions/ollama/cpu/compose.yaml b/distributions/ollama/compose.yaml similarity index 100% rename from distributions/ollama/cpu/compose.yaml rename to distributions/ollama/compose.yaml diff --git a/distributions/ollama/gpu/run.yaml b/distributions/ollama/run.yaml similarity index 100% rename from distributions/ollama/gpu/run.yaml rename to distributions/ollama/run.yaml diff --git a/distributions/remote-vllm/build.yaml b/distributions/remote-vllm/build.yaml new file mode 120000 index 000000000..52e5d0f2d --- /dev/null +++ b/distributions/remote-vllm/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/remote-vllm/build.yaml \ No newline at end of file diff --git a/distributions/remote-vllm/compose.yaml b/distributions/remote-vllm/compose.yaml new file mode 100644 index 000000000..a83ed79fc --- /dev/null +++ b/distributions/remote-vllm/compose.yaml @@ -0,0 +1,48 @@ +services: + vllm: + image: vllm/vllm-openai:latest + network_mode: "host" + volumes: + - $HOME/.cache/huggingface:/root/.cache/huggingface + ports: + - "8000:8000" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=0 + command: [] + deploy: + resources: + reservations: + devices: + - driver: nvidia + # that's the closest analogue to --gpus; provide + # an integer amount of devices or 'all' + count: 1 + # Devices are reserved using a list of capabilities, making + # capabilities the only required field. A device MUST + # satisfy all the requested capabilities for a successful + # reservation. + capabilities: [gpu] + runtime: nvidia + llamastack: + depends_on: + - vllm + image: llamastack/distribution-remote-vllm + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + # Link to ollama run.yaml file + - ./run.yaml:/root/llamastack-run-remote-vllm.yaml + ports: + - "5000:5000" + # Hack: wait for vllm server to start before starting docker + entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s +volumes: + vllm: diff --git a/distributions/remote-vllm/run.yaml b/distributions/remote-vllm/run.yaml new file mode 100644 index 000000000..2d0d36370 --- /dev/null +++ b/distributions/remote-vllm/run.yaml @@ -0,0 +1,46 @@ +version: '2' +built_at: '2024-10-08T17:40:45.325529' +image_name: local +docker_image: null +conda_env: local +apis: +- shields +- agents +- models +- memory +- memory_banks +- inference +- safety +providers: + inference: + - provider_id: vllm0 + provider_type: remote::vllm + config: + url: http://127.0.0.1:8000 + safety: + - provider_id: meta0 + provider_type: meta-reference + config: + llama_guard_shield: + model: Llama-Guard-3-1B + excluded_categories: [] + disable_input_check: false + disable_output_check: false + prompt_guard_shield: + model: Prompt-Guard-86M + memory: + - provider_id: meta0 + provider_type: meta-reference + config: {} + agents: + - provider_id: meta0 + provider_type: meta-reference + config: + persistence_store: + namespace: null + type: sqlite + db_path: ~/.llama/runtime/kvstore.db + telemetry: + - provider_id: meta0 + provider_type: meta-reference + config: {} diff --git a/distributions/vllm/build.yaml b/distributions/vllm/build.yaml deleted file mode 120000 index dfc9401b6..000000000 --- a/distributions/vllm/build.yaml +++ /dev/null @@ -1 +0,0 @@ -../../llama_stack/templates/vllm/build.yaml \ No newline at end of file diff --git a/docs/source/getting_started/distributions/self_hosted_distro/ollama.md b/docs/source/getting_started/distributions/self_hosted_distro/ollama.md index 0d4d90ee6..37bef9536 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/ollama.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/ollama.md @@ -2,25 +2,35 @@ The `llamastack/distribution-ollama` distribution consists of the following provider configurations. -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |---------------- |---------------- |---------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::ollama | meta-reference | remote::pgvector, remote::chroma | remote::ollama | meta-reference | +| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | +|----------------- |---------------- |---------------- |------------------------------------ |---------------- |---------------- | +| **Provider(s)** | remote::ollama | meta-reference | remote::pgvector, remote::chromadb | meta-reference | meta-reference | -### Docker: Start a Distribution (Single Node GPU) +## Using Docker Compose + +You can use `docker compose` to start a Ollama server and connect with Llama Stack server in a single command. + +### Docker: Start the Distribution (Single Node regular Desktop machine) + +> [!NOTE] +> This will start an ollama server with CPU only, please see [Ollama Documentations](https://github.com/ollama/ollama) for serving models on CPU only. + +```bash +$ cd distributions/ollama; docker compose up +``` + +### Docker: Start a Distribution (Single Node with nvidia GPUs) > [!NOTE] > This assumes you have access to GPU to start a Ollama server with access to your GPU. -``` -$ cd distributions/ollama/gpu -$ ls -compose.yaml run.yaml -$ docker compose up +```bash +$ cd distributions/ollama-gpu; docker compose up ``` You will see outputs similar to following --- -``` +```bash [ollama] | [GIN] 2024/10/18 - 21:19:41 | 200 | 226.841µs | ::1 | GET "/api/ps" [ollama] | [GIN] 2024/10/18 - 21:19:42 | 200 | 60.908µs | ::1 | GET "/api/ps" INFO: Started server process [1] @@ -34,36 +44,24 @@ INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit) ``` To kill the server -``` +```bash docker compose down ``` -### Docker: Start the Distribution (Single Node CPU) +## Starting Ollama and Llama Stack separately -> [!NOTE] -> This will start an ollama server with CPU only, please see [Ollama Documentations](https://github.com/ollama/ollama) for serving models on CPU only. +If you wish to separately spin up a Ollama server, and connect with Llama Stack, you should use the following commands. -``` -$ cd distributions/ollama/cpu -$ ls -compose.yaml run.yaml -$ docker compose up -``` - -### Conda: ollama run + llama stack run - -If you wish to separately spin up a Ollama server, and connect with Llama Stack, you may use the following commands. - -#### Start Ollama server. -- Please check the [Ollama Documentations](https://github.com/ollama/ollama) for more details. +#### Start Ollama server +- Please check the [Ollama Documentation](https://github.com/ollama/ollama) for more details. **Via Docker** -``` +```bash docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama ``` **Via CLI** -``` +```bash ollama run ``` @@ -71,7 +69,7 @@ ollama run **Via Conda** -``` +```bash llama stack build --template ollama --image-type conda llama stack run ./gpu/run.yaml ``` @@ -82,7 +80,7 @@ docker run --network host -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./gpu/run ``` Make sure in your `run.yaml` file, your inference provider is pointing to the correct Ollama endpoint. E.g. -``` +```yaml inference: - provider_id: ollama0 provider_type: remote::ollama @@ -96,7 +94,7 @@ inference: You can use ollama for managing model downloads. -``` +```bash ollama pull llama3.1:8b-instruct-fp16 ollama pull llama3.1:70b-instruct-fp16 ``` @@ -106,7 +104,7 @@ ollama pull llama3.1:70b-instruct-fp16 To serve a new model with `ollama` -``` +```bash ollama run ``` @@ -119,7 +117,7 @@ llama3.1:8b-instruct-fp16 4aacac419454 17 GB 100% GPU 4 minutes fro ``` To verify that the model served by ollama is correctly connected to Llama Stack server -``` +```bash $ llama-stack-client models list +----------------------+----------------------+---------------+-----------------------------------------------+ | identifier | llama_model | provider_id | metadata | diff --git a/docs/source/getting_started/distributions/self_hosted_distro/remote_vllm.md b/docs/source/getting_started/distributions/self_hosted_distro/remote_vllm.md new file mode 100644 index 000000000..2ab8df7b7 --- /dev/null +++ b/docs/source/getting_started/distributions/self_hosted_distro/remote_vllm.md @@ -0,0 +1,83 @@ +# Remote vLLM Distribution + +The `llamastack/distribution-remote-vllm` distribution consists of the following provider configurations. + +| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | +|----------------- |---------------- |---------------- |------------------------------------ |---------------- |---------------- | +| **Provider(s)** | remote::vllm | meta-reference | remote::pgvector, remote::chromadb | meta-reference | meta-reference | + +You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference. + +## Using Docker Compose + +You can use `docker compose` to start a vLLM container and Llama Stack server container together. + +> [!NOTE] +> This assumes you have access to GPU to start a vLLM server with access to your GPU. + +```bash +$ cd distributions/remote-vllm; docker compose up +``` + +You will see outputs similar to following --- +``` + +``` + +To kill the server +```bash +docker compose down +``` + +## Starting vLLM and Llama Stack separately + +You may want to start a vLLM server and connect with Llama Stack manually. There are two ways to start a vLLM server and connect with Llama Stack. + + +#### Start vLLM server. + +```bash +docker run --runtime nvidia --gpus all \ + -v ~/.cache/huggingface:/root/.cache/huggingface \ + --env "HUGGING_FACE_HUB_TOKEN=" \ + -p 8000:8000 \ + --ipc=host \ + vllm/vllm-openai:latest \ + --model meta-llama/Llama-3.1-8B-Instruct +``` + +Please check the [vLLM Documentation](https://docs.vllm.ai/en/v0.5.5/serving/deploying_with_docker.html) for more details. + + +#### Start Llama Stack server pointing to your vLLM server + + +We have provided a template `run.yaml` file in the `distributions/remote-vllm` directory. Please make sure to modify the `inference.provider_id` to point to your vLLM server endpoint. As an example, if your vLLM server is running on `http://127.0.0.1:8000`, your `run.yaml` file should look like the following: +```yaml +inference: + - provider_id: vllm0 + provider_type: remote::vllm + config: + url: http://127.0.0.1:8000 +``` + +**Via Conda** + +If you are using Conda, you can build and run the Llama Stack server with the following commands: +```bash +cd distributions/remote-vllm +llama stack build --template remote_vllm --image-type conda +llama stack run run.yaml +``` + +**Via Docker** + +You can use the Llama Stack Docker image to start the server with the following command: +```bash +docker run --network host -it -p 5000:5000 \ + -v ~/.llama:/root/.llama \ + -v ./gpu/run.yaml:/root/llamastack-run-remote-vllm.yaml \ + --gpus=all \ + llamastack/distribution-remote-vllm \ + --yaml_config /root/llamastack-run-remote-vllm.yaml +``` diff --git a/docs/source/getting_started/index.md b/docs/source/getting_started/index.md index 92643d87e..718bb185c 100644 --- a/docs/source/getting_started/index.md +++ b/docs/source/getting_started/index.md @@ -80,6 +80,11 @@ Llama3.1-8B-Instruct Llama3.2-1B Llama3.2-3B-Instruct Llama- ::: +:::{tab-item} vLLM +##### System Requirements +Access to Single-Node GPU to start a vLLM server. +::: + :::{tab-item} tgi ##### System Requirements Access to Single-Node GPU to start a TGI server. @@ -119,6 +124,22 @@ docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run. ``` ::: +:::{tab-item} vLLM +``` +$ cd llama-stack/distributions/remote-vllm && docker compose up +``` + +The script will first start up vLLM server on port 8000, then start up Llama Stack distribution server hooking up to it for inference. You should see the following outputs -- +``` + +``` + +To kill the server +``` +docker compose down +``` +::: + :::{tab-item} tgi ``` $ cd llama-stack/distributions/tgi && docker compose up @@ -144,7 +165,11 @@ docker compose down :::{tab-item} ollama ``` -$ cd llama-stack/distributions/ollama/cpu && docker compose up +$ cd llama-stack/distributions/ollama && docker compose up + +# OR + +$ cd llama-stack/distributions/ollama-gpu && docker compose up ``` You will see outputs similar to following --- diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py index dc6fa9592..1d3eabe0d 100644 --- a/llama_stack/providers/registry/inference.py +++ b/llama_stack/providers/registry/inference.py @@ -45,7 +45,7 @@ def available_providers() -> List[ProviderSpec]: ), InlineProviderSpec( api=Api.inference, - provider_type="vllm", + provider_type="inline::vllm", pip_packages=[ "vllm", ], diff --git a/llama_stack/templates/inline-vllm/build.yaml b/llama_stack/templates/inline-vllm/build.yaml new file mode 100644 index 000000000..d0fe93aa3 --- /dev/null +++ b/llama_stack/templates/inline-vllm/build.yaml @@ -0,0 +1,13 @@ +name: meta-reference-gpu +distribution_spec: + docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime + description: Use code from `llama_stack` itself to serve all llama stack APIs + providers: + inference: meta-reference + memory: + - meta-reference + - remote::chromadb + - remote::pgvector + safety: meta-reference + agents: meta-reference + telemetry: meta-reference diff --git a/llama_stack/templates/remote-vllm/build.yaml b/llama_stack/templates/remote-vllm/build.yaml new file mode 100644 index 000000000..ea95992f3 --- /dev/null +++ b/llama_stack/templates/remote-vllm/build.yaml @@ -0,0 +1,12 @@ +name: remote-vllm +distribution_spec: + description: Use (an external) vLLM server for running LLM inference + providers: + inference: remote::vllm + memory: + - meta-reference + - remote::chromadb + - remote::pgvector + safety: meta-reference + agents: meta-reference + telemetry: meta-reference diff --git a/llama_stack/templates/vllm/build.yaml b/llama_stack/templates/vllm/build.yaml deleted file mode 100644 index d842896db..000000000 --- a/llama_stack/templates/vllm/build.yaml +++ /dev/null @@ -1,9 +0,0 @@ -name: vllm -distribution_spec: - description: Like local, but use vLLM for running LLM inference - providers: - inference: vllm - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference