chore: update vllm k8s command to support tool calling (#2717)

# What does this PR do?


## Test Plan
This commit is contained in:
ehhuang 2025-07-10 14:40:17 -07:00 committed by GitHub
parent 5fe3027cbf
commit 4cf1952c32
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 6 additions and 6 deletions

View file

@ -6,12 +6,12 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
export POSTGRES_USER=${POSTGRES_USER:-llamastack} export POSTGRES_USER=llamastack
export POSTGRES_DB=${POSTGRES_DB:-llamastack} export POSTGRES_DB=llamastack
export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-llamastack} export POSTGRES_PASSWORD=llamastack
export INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct
export SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B
# HF_TOKEN should be set by the user; base64 encode it for the secret # HF_TOKEN should be set by the user; base64 encode it for the secret
if [ -n "${HF_TOKEN:-}" ]; then if [ -n "${HF_TOKEN:-}" ]; then

View file

@ -32,7 +32,7 @@ spec:
image: vllm/vllm-openai:latest image: vllm/vllm-openai:latest
command: ["/bin/sh", "-c"] command: ["/bin/sh", "-c"]
args: args:
- "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6" - "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6 --enable-auto-tool-choice --tool-call-parser llama4_pythonic"
env: env:
- name: INFERENCE_MODEL - name: INFERENCE_MODEL
value: "${INFERENCE_MODEL}" value: "${INFERENCE_MODEL}"