mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-22 16:23:08 +00:00
129 lines
3.9 KiB
Text
129 lines
3.9 KiB
Text
apiVersion: v1
|
|
kind: PersistentVolumeClaim
|
|
metadata:
|
|
name: llama-pvc
|
|
spec:
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 1Gi
|
|
---
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: llama-stack-server
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: llama-stack
|
|
app.kubernetes.io/component: server
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app.kubernetes.io/name: llama-stack
|
|
app.kubernetes.io/component: server
|
|
spec:
|
|
initContainers:
|
|
- name: wait-for-services
|
|
image: busybox:1.28
|
|
command: ['sh', '-c', '
|
|
echo "Waiting for all required services to be ready...";
|
|
|
|
echo "Checking vllm-server...";
|
|
until nc -z vllm-server.default.svc.cluster.local 8001; do
|
|
echo "waiting for vllm-server on port 8001";
|
|
sleep 2;
|
|
done;
|
|
echo "vllm-server is ready!";
|
|
|
|
echo "Checking llm-nim-code...";
|
|
until nc -z llm-nim-code.default.svc.cluster.local 8000; do
|
|
echo "waiting for llm-nim-code on port 8000";
|
|
sleep 2;
|
|
done;
|
|
echo "llm-nim-code is ready!";
|
|
|
|
echo "Checking ollama-safety...";
|
|
until nc -z ollama-safety.default.svc.cluster.local 11434; do
|
|
echo "waiting for ollama-safety on port 11434";
|
|
sleep 2;
|
|
done;
|
|
echo "ollama-safety is ready!";
|
|
|
|
echo "All services are ready!";
|
|
']
|
|
containers:
|
|
- name: llama-stack
|
|
image: llamastack/distribution-starter:0.2.15
|
|
resources:
|
|
requests:
|
|
memory: "2Gi"
|
|
cpu: "4000m"
|
|
ephemeral-storage: "6Gi"
|
|
limits:
|
|
memory: "2Gi"
|
|
cpu: "4000m"
|
|
ephemeral-storage: "6Gi"
|
|
env:
|
|
- name: ENABLE_CHROMADB
|
|
value: "true"
|
|
- name: CHROMADB_URL
|
|
value: http://chromadb.default.svc.cluster.local:6000
|
|
- name: VLLM_URL
|
|
value: http://vllm-server.default.svc.cluster.local:8001/v1
|
|
- name: VLLM_MAX_TOKENS
|
|
value: "80000"
|
|
- name: OTEL_EXPORTER_OTLP_ENDPOINT
|
|
value: http://jaeger-dev-collector.observability:4318
|
|
- name: OTEL_SERVICE_NAME
|
|
value: llama-stack
|
|
- name: NVIDIA_BASE_URL
|
|
value: http://llm-nim-code.default.svc.cluster.local:8000
|
|
- name: OLLAMA_BASE_URL
|
|
value: http://ollama-safety.default.svc.cluster.local:11434
|
|
- name: POSTGRES_HOST
|
|
value: postgres-server.default.svc.cluster.local
|
|
- name: POSTGRES_PORT
|
|
value: "5432"
|
|
- name: VLLM_TLS_VERIFY
|
|
value: "false"
|
|
- name: INFERENCE_MODEL
|
|
value: "${INFERENCE_MODEL}"
|
|
- name: CODE_MODEL
|
|
value: "${CODE_MODEL}"
|
|
- name: TAVILY_SEARCH_API_KEY
|
|
value: "${TAVILY_SEARCH_API_KEY}"
|
|
- name: OLLAMA_MODEL
|
|
value: "${OLLAMA_MODEL}"
|
|
command: ["/bin/sh"]
|
|
args:
|
|
- -c
|
|
- |
|
|
# Install pip and git
|
|
/usr/local/bin/python -m pip install --upgrade pip
|
|
apt-get update && apt-get install -y git
|
|
# Clone the repository
|
|
git clone https://github.com/meta-llama/llama-stack.git /app
|
|
git checkout 7f83433
|
|
|
|
cd /app/llama_stack/
|
|
# Install llama-stack
|
|
pip install -e .
|
|
# Run the llama-stack server
|
|
python -m llama_stack.distribution.server.server --config /etc/config/stack_run_config.yaml --port 8321
|
|
ports:
|
|
- containerPort: 8321
|
|
volumeMounts:
|
|
- name: llama-storage
|
|
mountPath: /root/.llama
|
|
- name: llama-config
|
|
mountPath: /etc/config
|
|
volumes:
|
|
- name: llama-storage
|
|
persistentVolumeClaim:
|
|
claimName: llama-pvc
|
|
- name: llama-config
|
|
configMap:
|
|
name: llama-stack-config
|