mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-20 11:47:00 +00:00
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
Integration Tests / discover-tests (push) Successful in 3s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 4s
Python Package Build Test / build (3.12) (push) Failing after 3s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 10s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 8s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 14s
Unit Tests / unit-tests (3.12) (push) Failing after 5s
Update ReadTheDocs / update-readthedocs (push) Failing after 3s
Python Package Build Test / build (3.13) (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 14s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 12s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 11s
Test External Providers / test-external-providers (venv) (push) Failing after 50s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 58s
Unit Tests / unit-tests (3.13) (push) Failing after 54s
Integration Tests / test-matrix (push) Failing after 53s
Pre-commit / pre-commit (push) Successful in 1m40s
# What does this PR do? - enables auth - updates to use distribution-starter docker ## Test Plan bash apply.sh
69 lines
2 KiB
Text
69 lines
2 KiB
Text
apiVersion: v1
|
|
kind: PersistentVolumeClaim
|
|
metadata:
|
|
name: llama-pvc
|
|
spec:
|
|
accessModes:
|
|
- ReadWriteOnce
|
|
resources:
|
|
requests:
|
|
storage: 1Gi
|
|
---
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: llama-stack-server
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: llama-stack
|
|
app.kubernetes.io/component: server
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app.kubernetes.io/name: llama-stack
|
|
app.kubernetes.io/component: server
|
|
spec:
|
|
containers:
|
|
- name: llama-stack
|
|
image: llamastack/distribution-starter:latest
|
|
imagePullPolicy: Always # since we have specified latest instead of a version
|
|
env:
|
|
- name: ENABLE_CHROMADB
|
|
value: "true"
|
|
- name: CHROMADB_URL
|
|
value: http://chromadb.default.svc.cluster.local:6000
|
|
- name: VLLM_URL
|
|
value: http://vllm-server.default.svc.cluster.local:8000/v1
|
|
- name: VLLM_MAX_TOKENS
|
|
value: "3072"
|
|
- name: VLLM_SAFETY_URL
|
|
value: http://vllm-server-safety.default.svc.cluster.local:8001/v1
|
|
- name: POSTGRES_HOST
|
|
value: postgres-server.default.svc.cluster.local
|
|
- name: POSTGRES_PORT
|
|
value: "5432"
|
|
- name: VLLM_TLS_VERIFY
|
|
value: "false"
|
|
- name: INFERENCE_MODEL
|
|
value: "${INFERENCE_MODEL}"
|
|
- name: SAFETY_MODEL
|
|
value: "${SAFETY_MODEL}"
|
|
- name: TAVILY_SEARCH_API_KEY
|
|
value: "${TAVILY_SEARCH_API_KEY}"
|
|
command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/etc/config/stack_run_config.yaml", "--port", "8321"]
|
|
ports:
|
|
- containerPort: 8321
|
|
volumeMounts:
|
|
- name: llama-storage
|
|
mountPath: /root/.llama
|
|
- name: llama-config
|
|
mountPath: /etc/config
|
|
volumes:
|
|
- name: llama-storage
|
|
persistentVolumeClaim:
|
|
claimName: llama-pvc
|
|
- name: llama-config
|
|
configMap:
|
|
name: llama-stack-config
|