mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-21 17:33:12 +00:00
# What does this PR do? 1. Add our own benchmark script instead of locust (doesn't support measuring streaming latency well) 2. Simplify k8s deployment 3. Add a simple profile script for locally running server ## Test Plan ❮ ./run-benchmark.sh --target stack --duration 180 --concurrent 10 ============================================================ BENCHMARK RESULTS ============================================================ Total time: 180.00s Concurrent users: 10 Total requests: 1636 Successful requests: 1636 Failed requests: 0 Success rate: 100.0% Requests per second: 9.09 Response Time Statistics: Mean: 1.095s Median: 1.721s Min: 0.136s Max: 3.218s Std Dev: 0.762s Percentiles: P50: 1.721s P90: 1.751s P95: 1.756s P99: 1.796s Time to First Token (TTFT) Statistics: Mean: 0.037s Median: 0.037s Min: 0.023s Max: 0.211s Std Dev: 0.011s TTFT Percentiles: P50: 0.037s P90: 0.040s P95: 0.044s P99: 0.055s Streaming Statistics: Mean chunks per response: 64.0 Total chunks received: 104775
83 lines
2.3 KiB
Text
83 lines
2.3 KiB
Text
apiVersion: v1
|
|
kind: PersistentVolumeClaim
|
|
metadata:
|
|
name: llama-benchmark-pvc
|
|
spec:
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 1Gi
|
|
---
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: llama-stack-benchmark-server
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: llama-stack-benchmark
|
|
app.kubernetes.io/component: server
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app.kubernetes.io/name: llama-stack-benchmark
|
|
app.kubernetes.io/component: server
|
|
spec:
|
|
containers:
|
|
- name: llama-stack-benchmark
|
|
image: llamastack/distribution-starter:latest
|
|
imagePullPolicy: Always # since we have specified latest instead of a version
|
|
env:
|
|
- name: ENABLE_CHROMADB
|
|
value: "true"
|
|
- name: CHROMADB_URL
|
|
value: http://chromadb.default.svc.cluster.local:6000
|
|
- name: POSTGRES_HOST
|
|
value: postgres-server.default.svc.cluster.local
|
|
- name: POSTGRES_PORT
|
|
value: "5432"
|
|
- name: INFERENCE_MODEL
|
|
value: "${INFERENCE_MODEL}"
|
|
- name: SAFETY_MODEL
|
|
value: "${SAFETY_MODEL}"
|
|
- name: TAVILY_SEARCH_API_KEY
|
|
value: "${TAVILY_SEARCH_API_KEY}"
|
|
- name: VLLM_URL
|
|
value: http://vllm-server.default.svc.cluster.local:8000/v1
|
|
- name: VLLM_MAX_TOKENS
|
|
value: "3072"
|
|
- name: VLLM_SAFETY_URL
|
|
value: http://vllm-server-safety.default.svc.cluster.local:8001/v1
|
|
- name: VLLM_TLS_VERIFY
|
|
value: "false"
|
|
command: ["python", "-m", "llama_stack.core.server.server", "/etc/config/stack_run_config.yaml", "--port", "8323"]
|
|
ports:
|
|
- containerPort: 8323
|
|
volumeMounts:
|
|
- name: llama-storage
|
|
mountPath: /root/.llama
|
|
- name: llama-config
|
|
mountPath: /etc/config
|
|
volumes:
|
|
- name: llama-storage
|
|
persistentVolumeClaim:
|
|
claimName: llama-benchmark-pvc
|
|
- name: llama-config
|
|
configMap:
|
|
name: llama-stack-config
|
|
---
|
|
apiVersion: v1
|
|
kind: Service
|
|
metadata:
|
|
name: llama-stack-benchmark-service
|
|
spec:
|
|
selector:
|
|
app.kubernetes.io/name: llama-stack-benchmark
|
|
app.kubernetes.io/component: server
|
|
ports:
|
|
- name: http
|
|
port: 8323
|
|
targetPort: 8323
|
|
type: ClusterIP
|