diff --git a/distributions/remote-vllm/compose.yaml b/distributions/remote-vllm/compose.yaml index a370df619..09701e099 100644 --- a/distributions/remote-vllm/compose.yaml +++ b/distributions/remote-vllm/compose.yaml @@ -1,13 +1,3 @@ -# NOTES: -# -# This Docker Compose (and the associated run.yaml) assumes you will be -# running in the default "bridged" network mode. -# -# If you need "host" network mode, please uncomment -# - network_mode: "host" -# -# Similarly change "host.docker.internal" to "localhost" in the run.yaml file -# services: vllm-inference: image: vllm/vllm-openai:latest diff --git a/distributions/remote-vllm/run-with-safety.yaml b/distributions/remote-vllm/run-with-safety.yaml index 7d401322b..d3e2ffcdc 100644 --- a/distributions/remote-vllm/run-with-safety.yaml +++ b/distributions/remote-vllm/run-with-safety.yaml @@ -1,14 +1,14 @@ version: '2' -built_at: 2024-11-17 14:07:24.568750 +built_at: 2024-11-17 14:48:55.487270 image_name: remote-vllm docker_image: llamastack/distribution-remote-vllm:test-0.0.52rc3 conda_env: null apis: +- safety - agents - telemetry -- safety -- inference - memory +- inference providers: inference: - provider_id: vllm-inference diff --git a/distributions/remote-vllm/run.yaml b/distributions/remote-vllm/run.yaml index 18f27cb20..21e58fbd8 100644 --- a/distributions/remote-vllm/run.yaml +++ b/distributions/remote-vllm/run.yaml @@ -1,14 +1,14 @@ version: '2' -built_at: 2024-11-17 14:07:24.563541 +built_at: 2024-11-17 14:48:55.476058 image_name: remote-vllm docker_image: llamastack/distribution-remote-vllm:test-0.0.52rc3 conda_env: null apis: +- safety - agents - telemetry -- safety -- inference - memory +- inference providers: inference: - provider_id: vllm-inference diff --git a/distributions/tgi/compose.yaml b/distributions/tgi/compose.yaml index bea7eb907..753b7880b 100644 --- a/distributions/tgi/compose.yaml +++ b/distributions/tgi/compose.yaml @@ -1,51 +1,89 @@ services: - text-generation-inference: + tgi-inference: image: ghcr.io/huggingface/text-generation-inference:latest - network_mode: "host" volumes: - $HOME/.cache/huggingface:/data + network_mode: ${NETWORK_MODE:-bridged} ports: - - "5009:5009" + - "${TGI_INFERENCE_PORT:-8080}:${TGI_INFERENCE_PORT:-8080}" devices: - nvidia.com/gpu=all environment: - - CUDA_VISIBLE_DEVICES=0 + - CUDA_VISIBLE_DEVICES=${TGI_INFERENCE_GPU:-0} + - HF_TOKEN=$HF_TOKEN - HF_HOME=/data - HF_DATASETS_CACHE=/data - HF_MODULES_CACHE=/data - HF_HUB_CACHE=/data - command: ["--dtype", "bfloat16", "--usage-stats", "on", "--sharded", "false", "--model-id", "meta-llama/Llama-3.1-8B-Instruct", "--port", "5009", "--cuda-memory-fraction", "0.3"] + command: > + --dtype bfloat16 + --usage-stats off + --sharded false + --model-id ${TGI_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + --port ${TGI_INFERENCE_PORT:-8080} + --cuda-memory-fraction 0.75 + healthcheck: + test: ["CMD", "curl", "-f", "http://tgi-inference:${TGI_INFERENCE_PORT:-8080}/health"] + interval: 5s + timeout: 5s + retries: 30 deploy: resources: reservations: devices: - driver: nvidia - # that's the closest analogue to --gpus; provide - # an integer amount of devices or 'all' - count: 1 - # Devices are reserved using a list of capabilities, making - # capabilities the only required field. A device MUST - # satisfy all the requested capabilities for a successful - # reservation. capabilities: [gpu] runtime: nvidia + + tgi-${TGI_SAFETY_MODEL:+safety}: + image: ghcr.io/huggingface/text-generation-inference:latest + volumes: + - $HOME/.cache/huggingface:/data + network_mode: ${NETWORK_MODE:-bridged} + ports: + - "${TGI_SAFETY_PORT:-8081}:${TGI_SAFETY_PORT:-8081}" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=${TGI_SAFETY_GPU:-1} + - HF_TOKEN=$HF_TOKEN + - HF_HOME=/data + - HF_DATASETS_CACHE=/data + - HF_MODULES_CACHE=/data + - HF_HUB_CACHE=/data + command: > + --dtype bfloat16 + --usage-stats off + --sharded false + --model-id ${TGI_SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + --port ${TGI_SAFETY_PORT:-8081} + --cuda-memory-fraction 0.75 healthcheck: - test: ["CMD", "curl", "-f", "http://text-generation-inference:5009/health"] + test: ["CMD", "curl", "-f", "http://tgi-safety:${TGI_SAFETY_PORT:-8081}/health"] interval: 5s timeout: 5s retries: 30 + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + runtime: nvidia + llamastack: depends_on: - text-generation-inference: + tgi-inference: condition: service_healthy - image: llamastack/distribution-tgi - network_mode: "host" + tgi-${TGI_SAFETY_MODEL:+safety}: + condition: service_healthy + image: llamastack/distribution-tgi:test-0.0.52rc3 + network_mode: ${NETWORK_MODE:-bridged} volumes: - ~/.llama:/root/.llama - # Link to TGI run.yaml file - - ./run.yaml:/root/my-run.yaml + - ./run${TGI_SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml ports: - - "5000:5000" + - "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}" # Hack: wait for TGI server to start before starting docker entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" restart_policy: @@ -53,3 +91,13 @@ services: delay: 3s max_attempts: 5 window: 60s + environment: + - TGI_URL=http://tgi-inference:${TGI_INFERENCE_PORT:-8080} + - SAFETY_TGI_URL=http://tgi-safety:${TGI_SAFETY_PORT:-8081} + - INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + - SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + +volumes: + tgi-inference: + tgi-safety: + llamastack: diff --git a/distributions/tgi/run-with-safety.yaml b/distributions/tgi/run-with-safety.yaml new file mode 100644 index 000000000..504bf3e33 --- /dev/null +++ b/distributions/tgi/run-with-safety.yaml @@ -0,0 +1,67 @@ +version: '2' +built_at: 2024-11-17 14:48:56.991119 +image_name: tgi +docker_image: llamastack/distribution-remote-tgi:test-0.0.52rc3 +conda_env: null +apis: +- safety +- agents +- telemetry +- memory +- inference +providers: + inference: + - provider_id: tgi-inference + provider_type: remote::tgi + config: + url: ${env.TGI_URL} + - provider_id: tgi-safety + provider_type: remote::tgi + config: + url: ${env.SAFETY_TGI_URL} + memory: + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard + config: {} + agents: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: + persistence_store: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/agents_store.db + telemetry: + - provider_id: meta-reference + provider_type: inline::meta-reference + config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: tgi-inference + provider_model_id: null +- metadata: {} + model_id: ${env.SAFETY_MODEL} + provider_id: tgi-safety + provider_model_id: null +shields: +- params: null + shield_id: ${env.SAFETY_MODEL} + provider_id: null + provider_shield_id: null +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/distributions/tgi/run.yaml b/distributions/tgi/run.yaml index 84ec536f8..8c45832e1 100644 --- a/distributions/tgi/run.yaml +++ b/distributions/tgi/run.yaml @@ -1,45 +1,55 @@ version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local +built_at: 2024-11-17 14:48:56.975663 +image_name: tgi +docker_image: llamastack/distribution-remote-tgi:test-0.0.52rc3 +conda_env: null apis: -- shields -- agents -- models -- memory -- memory_banks -- inference - safety +- agents +- telemetry +- memory +- inference providers: inference: - - provider_id: tgi0 + - provider_id: tgi-inference provider_type: remote::tgi config: - url: http://127.0.0.1:5009 - safety: - - provider_id: meta0 - provider_type: inline::llama-guard - config: - model: Llama-Guard-3-1B - excluded_categories: [] - - provider_id: meta1 - provider_type: inline::prompt-guard - config: - model: Prompt-Guard-86M + url: ${env.TGI_URL} memory: - - provider_id: meta0 - provider_type: inline::meta-reference + - provider_id: faiss + provider_type: inline::faiss + config: + kvstore: + type: sqlite + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/faiss_store.db + safety: + - provider_id: llama-guard + provider_type: inline::llama-guard config: {} agents: - - provider_id: meta0 + - provider_id: meta-reference provider_type: inline::meta-reference config: persistence_store: - namespace: null type: sqlite - db_path: ~/.llama/runtime/kvstore.db + namespace: null + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/agents_store.db telemetry: - - provider_id: meta0 + - provider_id: meta-reference provider_type: inline::meta-reference config: {} +metadata_store: + namespace: null + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db +models: +- metadata: {} + model_id: ${env.INFERENCE_MODEL} + provider_id: tgi-inference + provider_model_id: null +shields: [] +memory_banks: [] +datasets: [] +scoring_fns: [] +eval_tasks: [] diff --git a/docs/source/getting_started/distributions/self_hosted_distro/tgi.md b/docs/source/getting_started/distributions/self_hosted_distro/tgi.md index 8ad9de181..bae0a19ac 100644 --- a/docs/source/getting_started/distributions/self_hosted_distro/tgi.md +++ b/docs/source/getting_started/distributions/self_hosted_distro/tgi.md @@ -2,35 +2,55 @@ The `llamastack/distribution-tgi` distribution consists of the following provider configurations. - -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::tgi | meta-reference | meta-reference, remote::pgvector, remote::chroma | meta-reference | meta-reference | + Provider Configuration +┏━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ API ┃ Provider(s) ┃ +┡━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ agents │ `inline::meta-reference` │ +│ inference │ `remote::tgi` │ +│ memory │ `inline::faiss`, `remote::chromadb`, `remote::pgvector` │ +│ safety │ `inline::llama-guard` │ +│ telemetry │ `inline::meta-reference` │ +└───────────┴─────────────────────────────────────────────────────────┘ -### Docker: Start the Distribution (Single Node GPU) +You can use this distribution if you have GPUs and want to run an independent TGI server container for running inference.### Environment Variables -> [!NOTE] -> This assumes you have access to GPU to start a TGI server with access to your GPU. +The following environment variables can be configured: + +- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) +- `INFERENCE_MODEL`: Inference model loaded into the TGI server (default: `meta-llama/Llama-3.2-3B-Instruct`) +- `TGI_URL`: URL of the TGI server with the main inference model (default: `http://host.docker.internal:8080}/v1`) +- `SAFETY_TGI_URL`: URL of the TGI server with the safety model (default: `http://host.docker.internal:8081/v1`) +- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) +### Models + +The following models are configured by default: +- `${env.INFERENCE_MODEL}` +- `${env.SAFETY_MODEL}` -``` -$ cd distributions/tgi && docker compose up +## Using Docker Compose + +You can use `docker compose` to start a TGI container and Llama Stack server container together. + +```bash +$ cd distributions/tgi; docker compose up ``` The script will first start up TGI server, then start up Llama Stack distribution server hooking up to the remote TGI provider for inference. You should be able to see the following outputs -- -``` +```bash [text-generation-inference] | 2024-10-15T18:56:33.810397Z INFO text_generation_router::server: router/src/server.rs:1813: Using config Some(Llama) [text-generation-inference] | 2024-10-15T18:56:33.810448Z WARN text_generation_router::server: router/src/server.rs:1960: Invalid hostname, defaulting to 0.0.0.0 [text-generation-inference] | 2024-10-15T18:56:33.864143Z INFO text_generation_router::server: router/src/server.rs:2353: Connected INFO: Started server process [1] INFO: Waiting for application startup. INFO: Application startup complete. -INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit) +INFO: Uvicorn running on http://[::]:5001 (Press CTRL+C to quit) ``` To kill the server -``` +```bash docker compose down ``` @@ -42,8 +62,12 @@ If you wish to separately spin up a TGI server, and connect with Llama Stack, yo #### Start TGI server locally - Please check the [TGI Getting Started Guide](https://github.com/huggingface/text-generation-inference?tab=readme-ov-file#get-started) to get a TGI endpoint. -``` -docker run --rm -it -v $HOME/.cache/huggingface:/data -p 5009:5009 --gpus all ghcr.io/huggingface/text-generation-inference:latest --dtype bfloat16 --usage-stats on --sharded false --model-id meta-llama/Llama-3.1-8B-Instruct --port 5009 +```bash +docker run --rm -it -v $HOME/.cache/huggingface:/data \ + -p 5009:5009 --gpus all \ + ghcr.io/huggingface/text-generation-inference:latest \ + --dtype bfloat16 --usage-stats on --sharded false \ + --model-id meta-llama/Llama-3.2-3B-Instruct --port 5009 ``` #### Start Llama Stack server pointing to TGI server @@ -57,12 +81,15 @@ llama stack run ./gpu/run.yaml ``` **Via Docker** -``` -docker run --network host -it -p 5000:5000 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack/distribution-tgi --yaml_config /root/my-run.yaml +```bash +docker run --network host -it -p 5001:5001 \ + -v ./run.yaml:/root/my-run.yaml --gpus=all \ + llamastack/distribution-tgi \ + --yaml_config /root/my-run.yaml ``` -Make sure in you `run.yaml` file, you inference provider is pointing to the correct TGI server endpoint. E.g. -``` +We have provided a template `run.yaml` file in the `distributions/tgi` directory. Make sure in your `run.yaml` file, you inference provider is pointing to the correct TGI server endpoint. E.g. +```yaml inference: - provider_id: tgi0 provider_type: remote::tgi @@ -76,17 +103,24 @@ To serve a new model with `tgi`, change the docker command flag `--model-id + --dtype bfloat16 --usage-stats on --sharded false + --model-id meta-llama/Llama-3.2-1B-Instruct + --port 5009 --cuda-memory-fraction 0.7 ``` or by changing the docker run command's `--model-id` flag -``` -docker run --rm -it -v $HOME/.cache/huggingface:/data -p 5009:5009 --gpus all ghcr.io/huggingface/text-generation-inference:latest --dtype bfloat16 --usage-stats on --sharded false --model-id meta-llama/Llama-3.2-1B-Instruct --port 5009 +```bash +docker run --rm -it -v $HOME/.cache/huggingface:/data \ + -p 5009:5009 --gpus all \ + ghcr.io/huggingface/text-generation-inference:latest \ + --dtype bfloat16 --usage-stats off --sharded false \ + --model-id meta-llama/Llama-3.2-3B-Instruct --port 5009 ``` In `run.yaml`, make sure you point the correct server endpoint to the TGI server endpoint serving your model. -``` +```yaml inference: - provider_id: tgi0 provider_type: remote::tgi diff --git a/llama_stack/providers/remote/inference/tgi/config.py b/llama_stack/providers/remote/inference/tgi/config.py index 863f81bf7..4441b1352 100644 --- a/llama_stack/providers/remote/inference/tgi/config.py +++ b/llama_stack/providers/remote/inference/tgi/config.py @@ -12,19 +12,20 @@ from pydantic import BaseModel, Field @json_schema_type class TGIImplConfig(BaseModel): - host: str = "localhost" - port: int = 8080 - protocol: str = "http" - - @property - def url(self) -> str: - return f"{self.protocol}://{self.host}:{self.port}" - + url: str = Field( + description="The URL for the TGI serving endpoint", + ) api_token: Optional[str] = Field( default=None, description="A bearer token if your TGI endpoint is protected.", ) + @classmethod + def sample_run_config(cls, url: str = "${env.TGI_URL}"): + return { + "url": url, + } + @json_schema_type class InferenceEndpointImplConfig(BaseModel): diff --git a/llama_stack/providers/remote/inference/tgi/docker_compose.yaml b/llama_stack/providers/remote/inference/tgi/docker_compose.yaml deleted file mode 100644 index 06638c28c..000000000 --- a/llama_stack/providers/remote/inference/tgi/docker_compose.yaml +++ /dev/null @@ -1,35 +0,0 @@ -services: - ${SERVICE_NAME:-tgi}: - image: ghcr.io/huggingface/text-generation-inference:2.3.1 - network_mode: "host" - volumes: - - $HOME/.cache/huggingface:/data - ports: - - ${TGI_PORT:-8000}:${TGI_PORT:-8000} - devices: - - nvidia.com/gpu=all - environment: - - CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-0} - - HF_HOME=/data - - HF_DATASETS_CACHE=/data - - HF_MODULES_CACHE=/data - - HF_HUB_CACHE=/data - command: > - --dtype bfloat16 - --usage-stats off - --sharded false - --model-id ${TGI_MODEL:-meta-llama/Llama-3.2-3B-Instruct} - --port ${TGI_PORT:-8000} - --cuda-memory-fraction ${TGI_CUDA_MEMORY_FRACTION:-0.8} - deploy: - resources: - reservations: - devices: - - driver: nvidia - capabilities: [gpu] - runtime: nvidia - healthcheck: - test: ["CMD", "curl", "-f", "http://${SERVICE_NAME:-tgi}:${TGI_PORT:-8000}/health"] - interval: 5s - timeout: 5s - retries: 30 diff --git a/llama_stack/providers/remote/inference/vllm/docker_compose.yaml b/llama_stack/providers/remote/inference/vllm/docker_compose.yaml deleted file mode 100644 index 227842272..000000000 --- a/llama_stack/providers/remote/inference/vllm/docker_compose.yaml +++ /dev/null @@ -1,26 +0,0 @@ -services: - ${SERVICE_NAME:-vllm}: - image: vllm/vllm-openai:latest - ports: - - ${VLLM_PORT:-5100}:${VLLM_PORT:-5100} - volumes: - - $HOME/.cache/huggingface:/root/.cache/huggingface - devices: - - nvidia.com/gpu=all - deploy: - resources: - reservations: - devices: - - driver: nvidia - capabilities: [gpu] - runtime: nvidia - environment: - - CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES:-0} - - HUGGING_FACE_HUB_TOKEN=${HF_TOKEN} - command: > - --gpu-memory-utilization 0.75 - --model ${VLLM_MODEL:-meta-llama/Llama-3.2-3B-Instruct} - --enforce-eager - --max-model-len 8192 - --max-num-seqs 16 - --port ${VLLM_PORT:-5100} diff --git a/llama_stack/templates/tgi/__init__.py b/llama_stack/templates/tgi/__init__.py new file mode 100644 index 000000000..fa1932f6a --- /dev/null +++ b/llama_stack/templates/tgi/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from .tgi import get_distribution_template # noqa: F401 diff --git a/llama_stack/templates/tgi/build.yaml b/llama_stack/templates/tgi/build.yaml index 5500361c4..9817d90c7 100644 --- a/llama_stack/templates/tgi/build.yaml +++ b/llama_stack/templates/tgi/build.yaml @@ -1,12 +1,19 @@ +version: '2' name: tgi distribution_spec: - description: Use TGI for running LLM inference + description: Use (an external) TGI server for running LLM inference + docker_image: llamastack/distribution-remote-tgi:test-0.0.52rc3 providers: - inference: remote::tgi + inference: + - remote::tgi memory: - inline::faiss - remote::chromadb - remote::pgvector - safety: inline::llama-guard - agents: inline::meta-reference - telemetry: inline::meta-reference + safety: + - inline::llama-guard + agents: + - inline::meta-reference + telemetry: + - inline::meta-reference +image_type: conda diff --git a/llama_stack/templates/tgi/doc_template.md b/llama_stack/templates/tgi/doc_template.md new file mode 100644 index 000000000..95963861a --- /dev/null +++ b/llama_stack/templates/tgi/doc_template.md @@ -0,0 +1,125 @@ +# TGI Distribution + +The `llamastack/distribution-{{ name }}` distribution consists of the following provider configurations. + +{{ providers_table }} + +You can use this distribution if you have GPUs and want to run an independent TGI server container for running inference. + +{%- if docker_compose_env_vars %} +### Environment Variables + +The following environment variables can be configured: + +{% for var, (default_value, description) in docker_compose_env_vars.items() %} +- `{{ var }}`: {{ description }} (default: `{{ default_value }}`) +{% endfor %} +{% endif %} + +{%- if default_models %} +### Models + +The following models are configured by default: +{% for model in default_models %} +- `{{ model.model_id }}` +{% endfor %} +{% endif %} + + +## Using Docker Compose + +You can use `docker compose` to start a TGI container and Llama Stack server container together. + +```bash +$ cd distributions/{{ name }}; docker compose up +``` + +The script will first start up TGI server, then start up Llama Stack distribution server hooking up to the remote TGI provider for inference. You should be able to see the following outputs -- +```bash +[text-generation-inference] | 2024-10-15T18:56:33.810397Z INFO text_generation_router::server: router/src/server.rs:1813: Using config Some(Llama) +[text-generation-inference] | 2024-10-15T18:56:33.810448Z WARN text_generation_router::server: router/src/server.rs:1960: Invalid hostname, defaulting to 0.0.0.0 +[text-generation-inference] | 2024-10-15T18:56:33.864143Z INFO text_generation_router::server: router/src/server.rs:2353: Connected +INFO: Started server process [1] +INFO: Waiting for application startup. +INFO: Application startup complete. +INFO: Uvicorn running on http://[::]:5001 (Press CTRL+C to quit) +``` + +To kill the server +```bash +docker compose down +``` + + +### Conda: TGI server + llama stack run + +If you wish to separately spin up a TGI server, and connect with Llama Stack, you may use the following commands. + +#### Start TGI server locally +- Please check the [TGI Getting Started Guide](https://github.com/huggingface/text-generation-inference?tab=readme-ov-file#get-started) to get a TGI endpoint. + +```bash +docker run --rm -it -v $HOME/.cache/huggingface:/data \ + -p 5009:5009 --gpus all \ + ghcr.io/huggingface/text-generation-inference:latest \ + --dtype bfloat16 --usage-stats on --sharded false \ + --model-id meta-llama/Llama-3.2-3B-Instruct --port 5009 +``` + +#### Start Llama Stack server pointing to TGI server + +**Via Conda** + +```bash +llama stack build --template {{ name }} --image-type conda +# -- start a TGI server endpoint +llama stack run ./gpu/run.yaml +``` + +**Via Docker** +```bash +docker run --network host -it -p 5001:5001 \ + -v ./run.yaml:/root/my-run.yaml --gpus=all \ + llamastack/distribution-{{ name }} \ + --yaml_config /root/my-run.yaml +``` + +We have provided a template `run.yaml` file in the `distributions/{{ name }}` directory. Make sure in your `run.yaml` file, you inference provider is pointing to the correct TGI server endpoint. E.g. +```yaml +inference: + - provider_id: tgi0 + provider_type: remote::tgi + config: + url: http://127.0.0.1:5009 +``` + + +### (Optional) Update Model Serving Configuration +To serve a new model with `tgi`, change the docker command flag `--model-id `. + +This can be done by edit the `command` args in `compose.yaml`. E.g. Replace "Llama-3.2-1B-Instruct" with the model you want to serve. + +```yaml +command: > + --dtype bfloat16 --usage-stats on --sharded false + --model-id meta-llama/Llama-3.2-1B-Instruct + --port 5009 --cuda-memory-fraction 0.7 +``` + +or by changing the docker run command's `--model-id` flag +```bash +docker run --rm -it -v $HOME/.cache/huggingface:/data \ + -p 5009:5009 --gpus all \ + ghcr.io/huggingface/text-generation-inference:latest \ + --dtype bfloat16 --usage-stats off --sharded false \ + --model-id meta-llama/Llama-3.2-3B-Instruct --port 5009 +``` + +In `run.yaml`, make sure you point the correct server endpoint to the TGI server endpoint serving your model. +```yaml +inference: + - provider_id: tgi0 + provider_type: remote::tgi + config: + url: http://127.0.0.1:5009 +``` diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py new file mode 100644 index 000000000..0987bc0b8 --- /dev/null +++ b/llama_stack/templates/tgi/tgi.py @@ -0,0 +1,97 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from pathlib import Path + +from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput +from llama_stack.providers.remote.inference.tgi import TGIImplConfig +from llama_stack.templates.template import DistributionTemplate, RunConfigSettings + + +def get_distribution_template() -> DistributionTemplate: + providers = { + "inference": ["remote::tgi"], + "memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"], + "safety": ["inline::llama-guard"], + "agents": ["inline::meta-reference"], + "telemetry": ["inline::meta-reference"], + } + + inference_provider = Provider( + provider_id="tgi-inference", + provider_type="remote::tgi", + config=TGIImplConfig.sample_run_config( + url="${env.TGI_URL}", + ), + ) + + inference_model = ModelInput( + model_id="${env.INFERENCE_MODEL}", + provider_id="tgi-inference", + ) + safety_model = ModelInput( + model_id="${env.SAFETY_MODEL}", + provider_id="tgi-safety", + ) + + return DistributionTemplate( + name="tgi", + distro_type="self_hosted", + description="Use (an external) TGI server for running LLM inference", + docker_image="llamastack/distribution-remote-tgi:test-0.0.52rc3", + template_path=Path(__file__).parent / "doc_template.md", + providers=providers, + default_models=[inference_model, safety_model], + run_configs={ + "run.yaml": RunConfigSettings( + provider_overrides={ + "inference": [inference_provider], + }, + default_models=[inference_model], + ), + "run-with-safety.yaml": RunConfigSettings( + provider_overrides={ + "inference": [ + inference_provider, + Provider( + provider_id="tgi-safety", + provider_type="remote::tgi", + config=TGIImplConfig.sample_run_config( + url="${env.SAFETY_TGI_URL}", + ), + ), + ], + }, + default_models=[ + inference_model, + safety_model, + ], + default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], + ), + }, + docker_compose_env_vars={ + "LLAMASTACK_PORT": ( + "5001", + "Port for the Llama Stack distribution server", + ), + "INFERENCE_MODEL": ( + "meta-llama/Llama-3.2-3B-Instruct", + "Inference model loaded into the TGI server", + ), + "TGI_URL": ( + "http://host.docker.internal:8080}/v1", + "URL of the TGI server with the main inference model", + ), + "SAFETY_TGI_URL": ( + "http://host.docker.internal:8081/v1", + "URL of the TGI server with the safety model", + ), + "SAFETY_MODEL": ( + "meta-llama/Llama-Guard-3-1B", + "Name of the safety (Llama-Guard) model to use", + ), + }, + )