apiVersion: v1 kind: PersistentVolumeClaim metadata: name: llama-pvc spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi --- apiVersion: apps/v1 kind: Deployment metadata: name: llama-stack-server spec: replicas: 1 selector: matchLabels: app.kubernetes.io/name: llama-stack app.kubernetes.io/component: server template: metadata: labels: app.kubernetes.io/name: llama-stack app.kubernetes.io/component: server spec: initContainers: - name: wait-for-vllm-server image: busybox:1.28 command: ['sh', '-c', 'until nc -z vllm-server.default.svc.cluster.local 8001; do echo waiting for vllm-server on port 8001; sleep 2; done;'] - name: wait-for-llm-nim-code image: busybox:1.28 command: ['sh', '-c', 'until nc -z llm-nim-code.default.svc.cluster.local 8000; do echo waiting for llm-nim-code on port 8000; sleep 2; done;'] containers: - name: llama-stack image: llamastack/distribution-starter:0.2.15 resources: requests: memory: "512Mi" cpu: "500m" ephemeral-storage: "6Gi" limits: memory: "1Gi" cpu: "1000m" ephemeral-storage: "6Gi" env: - name: ENABLE_CHROMADB value: "true" - name: CHROMADB_URL value: http://chromadb.default.svc.cluster.local:6000 - name: VLLM_URL value: http://vllm-server.default.svc.cluster.local:8001/v1 - name: VLLM_MAX_TOKENS value: "3072" - name: NVIDIA_BASE_URL value: http://llm-nim-code.default.svc.cluster.local:8000 - name: OLLAMA_BASE_URL value: http://ollama-safety.default.svc.cluster.local:8000 - name: POSTGRES_HOST value: postgres-server.default.svc.cluster.local - name: POSTGRES_PORT value: "5432" - name: VLLM_TLS_VERIFY value: "false" - name: INFERENCE_MODEL value: "${INFERENCE_MODEL}" - name: CODE_MODEL value: "${CODE_MODEL}" - name: TAVILY_SEARCH_API_KEY value: "${TAVILY_SEARCH_API_KEY}" - name: OLLAMA_MODLE value: "${OLLAMA_MODEL}" command: ["/bin/sh"] args: - -c - | # Install pip and git /usr/local/bin/python -m pip install --upgrade pip apt-get update && apt-get install -y git # Clone the repository git clone https://github.com/meta-llama/llama-stack.git /app git checkout k8s_demo cd /app/llama_stack/ # Install llama-stack pip install -e . # Run the llama-stack server python -m llama_stack.distribution.server.server --config /etc/config/stack_run_config.yaml --port 8321 ports: - containerPort: 8321 volumeMounts: - name: llama-storage mountPath: /root/.llama - name: llama-config mountPath: /etc/config volumes: - name: llama-storage persistentVolumeClaim: claimName: llama-pvc - name: llama-config configMap: name: llama-stack-config