mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
Some checks failed
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 3s
Unit Tests / unit-tests (3.13) (push) Failing after 3s
Update ReadTheDocs / update-readthedocs (push) Failing after 3s
Test Llama Stack Build / build (push) Failing after 3s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test Llama Stack Build / generate-matrix (push) Successful in 3s
Python Package Build Test / build (3.12) (push) Failing after 1s
Python Package Build Test / build (3.13) (push) Failing after 2s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 3s
Test Llama Stack Build / build-single-provider (push) Failing after 3s
Vector IO Integration Tests / test-matrix (push) Failing after 5s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 8s
Test External API and Providers / test-external (venv) (push) Failing after 3s
Unit Tests / unit-tests (3.12) (push) Failing after 4s
UI Tests / ui-tests (22) (push) Successful in 40s
Pre-commit / pre-commit (push) Successful in 1m9s
# What does this PR do? - Mostly AI-generated scripts to run guidellm (https://github.com/vllm-project/guidellm) benchmarks on k8s setup - Stack is using image built from main on 9/11 ## Test Plan See updated README.md
94 lines
2.7 KiB
Text
94 lines
2.7 KiB
Text
apiVersion: v1
|
|
kind: PersistentVolumeClaim
|
|
metadata:
|
|
name: llama-benchmark-pvc
|
|
spec:
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 1Gi
|
|
---
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: llama-stack-benchmark-server
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: llama-stack-benchmark
|
|
app.kubernetes.io/component: server
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app.kubernetes.io/name: llama-stack-benchmark
|
|
app.kubernetes.io/component: server
|
|
spec:
|
|
containers:
|
|
- name: llama-stack-benchmark
|
|
image: llamastack/distribution-starter:latest
|
|
imagePullPolicy: Always # since we have specified latest instead of a version
|
|
env:
|
|
- name: ENABLE_CHROMADB
|
|
value: "true"
|
|
- name: CHROMADB_URL
|
|
value: http://chromadb.default.svc.cluster.local:6000
|
|
- name: POSTGRES_HOST
|
|
value: postgres-server.default.svc.cluster.local
|
|
- name: POSTGRES_PORT
|
|
value: "5432"
|
|
- name: INFERENCE_MODEL
|
|
value: "${INFERENCE_MODEL}"
|
|
- name: SAFETY_MODEL
|
|
value: "${SAFETY_MODEL}"
|
|
- name: TAVILY_SEARCH_API_KEY
|
|
value: "${TAVILY_SEARCH_API_KEY}"
|
|
- name: VLLM_URL
|
|
value: http://vllm-server.default.svc.cluster.local:8000/v1
|
|
- name: VLLM_MAX_TOKENS
|
|
value: "3072"
|
|
- name: VLLM_SAFETY_URL
|
|
value: http://vllm-server-safety.default.svc.cluster.local:8001/v1
|
|
- name: VLLM_TLS_VERIFY
|
|
value: "false"
|
|
- name: LLAMA_STACK_LOGGING
|
|
value: "all=WARNING"
|
|
- name: LLAMA_STACK_CONFIG
|
|
value: "/etc/config/stack_run_config.yaml"
|
|
- name: LLAMA_STACK_WORKERS
|
|
value: "${LLAMA_STACK_WORKERS}"
|
|
command: ["uvicorn", "llama_stack.core.server.server:create_app", "--host", "0.0.0.0", "--port", "8323", "--workers", "$(LLAMA_STACK_WORKERS)", "--factory"]
|
|
ports:
|
|
- containerPort: 8323
|
|
resources:
|
|
requests:
|
|
cpu: "4"
|
|
limits:
|
|
cpu: "4"
|
|
volumeMounts:
|
|
- name: llama-storage
|
|
mountPath: /root/.llama
|
|
- name: llama-config
|
|
mountPath: /etc/config
|
|
volumes:
|
|
- name: llama-storage
|
|
persistentVolumeClaim:
|
|
claimName: llama-benchmark-pvc
|
|
- name: llama-config
|
|
configMap:
|
|
name: llama-stack-config
|
|
---
|
|
apiVersion: v1
|
|
kind: Service
|
|
metadata:
|
|
name: llama-stack-benchmark-service
|
|
spec:
|
|
selector:
|
|
app.kubernetes.io/name: llama-stack-benchmark
|
|
app.kubernetes.io/component: server
|
|
ports:
|
|
- name: http
|
|
port: 8323
|
|
targetPort: 8323
|
|
type: ClusterIP
|