mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 22:18:00 +00:00
# What does this PR do? 1. Added a simple mock openai-compat server that serves chat/completion 2. Add a benchmark server in EKS that includes mock inference server 3. Add locust (https://locust.io/) file for load testing ## Test Plan bash apply.sh kubectl port-forward service/locust-web-ui 8089:8089 Go to localhost:8089 to start a load test <img width="1392" height="334" alt="image" src="https://github.com/user-attachments/assets/d6aa3deb-583a-42ed-889b-751262b8e91c" /> <img width="1362" height="881" alt="image" src="https://github.com/user-attachments/assets/6a28b9b4-05e6-44e2-b504-07e60c12d35e" />
69 lines
2 KiB
Text
69 lines
2 KiB
Text
apiVersion: v1
|
|
kind: PersistentVolumeClaim
|
|
metadata:
|
|
name: llama-pvc
|
|
spec:
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 1Gi
|
|
---
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: llama-stack-server
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: llama-stack
|
|
app.kubernetes.io/component: server
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app.kubernetes.io/name: llama-stack
|
|
app.kubernetes.io/component: server
|
|
spec:
|
|
containers:
|
|
- name: llama-stack
|
|
image: llamastack/distribution-starter:latest
|
|
imagePullPolicy: Always # since we have specified latest instead of a version
|
|
env:
|
|
- name: ENABLE_CHROMADB
|
|
value: "true"
|
|
- name: CHROMADB_URL
|
|
value: http://chromadb.default.svc.cluster.local:6000
|
|
- name: VLLM_URL
|
|
value: http://vllm-server.default.svc.cluster.local:8000/v1
|
|
- name: VLLM_MAX_TOKENS
|
|
value: "3072"
|
|
- name: VLLM_SAFETY_URL
|
|
value: http://vllm-server-safety.default.svc.cluster.local:8001/v1
|
|
- name: VLLM_TLS_VERIFY
|
|
value: "false"
|
|
- name: POSTGRES_HOST
|
|
value: postgres-server.default.svc.cluster.local
|
|
- name: POSTGRES_PORT
|
|
value: "5432"
|
|
- name: INFERENCE_MODEL
|
|
value: "${INFERENCE_MODEL}"
|
|
- name: SAFETY_MODEL
|
|
value: "${SAFETY_MODEL}"
|
|
- name: TAVILY_SEARCH_API_KEY
|
|
value: "${TAVILY_SEARCH_API_KEY}"
|
|
command: ["python", "-m", "llama_stack.core.server.server", "/etc/config/stack_run_config.yaml", "--port", "8321"]
|
|
ports:
|
|
- containerPort: 8321
|
|
volumeMounts:
|
|
- name: llama-storage
|
|
mountPath: /root/.llama
|
|
- name: llama-config
|
|
mountPath: /etc/config
|
|
volumes:
|
|
- name: llama-storage
|
|
persistentVolumeClaim:
|
|
claimName: llama-pvc
|
|
- name: llama-config
|
|
configMap:
|
|
name: llama-stack-config
|