mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 10:54:19 +00:00
more fixes, gah
This commit is contained in:
parent
6f4f51f8d9
commit
6cbb3366f2
3 changed files with 16 additions and 1 deletions
|
@ -31,6 +31,8 @@ spec:
|
||||||
env:
|
env:
|
||||||
- name: ENABLE_CHROMADB
|
- name: ENABLE_CHROMADB
|
||||||
value: "true"
|
value: "true"
|
||||||
|
- name: CHROMADB_URL
|
||||||
|
value: http://chromadb.default.svc.cluster.local:6000
|
||||||
- name: VLLM_URL
|
- name: VLLM_URL
|
||||||
value: http://vllm-server.default.svc.cluster.local:8000/v1
|
value: http://vllm-server.default.svc.cluster.local:8000/v1
|
||||||
- name: VLLM_MAX_TOKENS
|
- name: VLLM_MAX_TOKENS
|
||||||
|
|
|
@ -57,3 +57,16 @@ spec:
|
||||||
- name: llama-storage
|
- name: llama-storage
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
claimName: vllm-models
|
claimName: vllm-models
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: vllm-server
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: vllm
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 8000
|
||||||
|
targetPort: 8000
|
||||||
|
type: ClusterIP
|
||||||
|
|
|
@ -41,7 +41,7 @@ spec:
|
||||||
image: vllm/vllm-openai:latest
|
image: vllm/vllm-openai:latest
|
||||||
command: ["/bin/sh", "-c"]
|
command: ["/bin/sh", "-c"]
|
||||||
args: [
|
args: [
|
||||||
"vllm serve ${SAFETY_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.3"
|
"vllm serve ${SAFETY_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --port 8001 --gpu-memory-utilization 0.3"
|
||||||
]
|
]
|
||||||
env:
|
env:
|
||||||
- name: HUGGING_FACE_HUB_TOKEN
|
- name: HUGGING_FACE_HUB_TOKEN
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue