From 4cf1952c32b74be607fd1aefb026a65e70d8b7ef Mon Sep 17 00:00:00 2001 From: ehhuang Date: Thu, 10 Jul 2025 14:40:17 -0700 Subject: [PATCH] chore: update vllm k8s command to support tool calling (#2717) # What does this PR do? ## Test Plan --- docs/source/distributions/k8s/apply.sh | 10 +++++----- docs/source/distributions/k8s/vllm-k8s.yaml.template | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/distributions/k8s/apply.sh b/docs/source/distributions/k8s/apply.sh index 06b1ea10c..7b403d34e 100755 --- a/docs/source/distributions/k8s/apply.sh +++ b/docs/source/distributions/k8s/apply.sh @@ -6,12 +6,12 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -export POSTGRES_USER=${POSTGRES_USER:-llamastack} -export POSTGRES_DB=${POSTGRES_DB:-llamastack} -export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-llamastack} +export POSTGRES_USER=llamastack +export POSTGRES_DB=llamastack +export POSTGRES_PASSWORD=llamastack -export INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} -export SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} +export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct +export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B # HF_TOKEN should be set by the user; base64 encode it for the secret if [ -n "${HF_TOKEN:-}" ]; then diff --git a/docs/source/distributions/k8s/vllm-k8s.yaml.template b/docs/source/distributions/k8s/vllm-k8s.yaml.template index 03f3759c3..22bee4bbc 100644 --- a/docs/source/distributions/k8s/vllm-k8s.yaml.template +++ b/docs/source/distributions/k8s/vllm-k8s.yaml.template @@ -32,7 +32,7 @@ spec: image: vllm/vllm-openai:latest command: ["/bin/sh", "-c"] args: - - "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6" + - "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6 --enable-auto-tool-choice --tool-call-parser llama4_pythonic" env: - name: INFERENCE_MODEL value: "${INFERENCE_MODEL}"