chore: update vllm k8s command to support tool calling

# What does this PR do?


## Test Plan
This commit is contained in:
Eric Huang 2025-07-10 14:39:01 -07:00
parent b18f4d1ccf
commit dbde805743
2 changed files with 6 additions and 6 deletions

View file

@ -32,7 +32,7 @@ spec:
image: vllm/vllm-openai:latest
command: ["/bin/sh", "-c"]
args:
- "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6"
- "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6 --enable-auto-tool-choice --tool-call-parser llama4_pythonic"
env:
- name: INFERENCE_MODEL
value: "${INFERENCE_MODEL}"