apiVersion: v1 kind: PersistentVolumeClaim metadata: name: vllm-models spec: accessModes: - ReadWriteOnce volumeMode: Filesystem storageClassName: gp2 resources: requests: storage: 50Gi --- apiVersion: apps/v1 kind: Deployment metadata: name: vllm-server spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: vllm template: metadata: labels: app.kubernetes.io/name: vllm workload-type: inference spec: nodeSelector: eks.amazonaws.com/nodegroup: gpu containers: - name: vllm image: vllm/vllm-openai:latest command: ["/bin/sh", "-c"] args: - "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6" env: - name: HUGGING_FACE_HUB_TOKEN valueFrom: secretKeyRef: name: hf-token-secret key: token ports: - containerPort: 8000 resources: requests: nvidia.com/gpu: 1 limits: nvidia.com/gpu: 1 volumeMounts: - name: llama-storage mountPath: /root/.cache/huggingface volumes: - name: llama-storage persistentVolumeClaim: claimName: vllm-models --- apiVersion: v1 kind: Service metadata: name: vllm-server spec: selector: app.kubernetes.io/name: vllm ports: - protocol: TCP port: 8000 targetPort: 8000 type: ClusterIP