mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 22:18:00 +00:00
# What does this PR do? 1. Added a simple mock openai-compat server that serves chat/completion 2. Add a benchmark server in EKS that includes mock inference server 3. Add locust (https://locust.io/) file for load testing ## Test Plan bash apply.sh kubectl port-forward service/locust-web-ui 8089:8089 Go to localhost:8089 to start a load test <img width="1392" height="334" alt="image" src="https://github.com/user-attachments/assets/d6aa3deb-583a-42ed-889b-751262b8e91c" /> <img width="1362" height="881" alt="image" src="https://github.com/user-attachments/assets/6a28b9b4-05e6-44e2-b504-07e60c12d35e" />
52 lines
1.1 KiB
YAML
52 lines
1.1 KiB
YAML
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: openai-mock
|
|
labels:
|
|
app: openai-mock
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app: openai-mock
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app: openai-mock
|
|
spec:
|
|
containers:
|
|
- name: openai-mock
|
|
image: python:3.12-slim
|
|
ports:
|
|
- containerPort: ${MOCK_INFERENCE_PORT}
|
|
env:
|
|
- name: PORT
|
|
value: "${MOCK_INFERENCE_PORT}"
|
|
- name: MOCK_MODELS
|
|
value: "${MOCK_INFERENCE_MODEL}"
|
|
- name: STREAM_DELAY_SECONDS
|
|
value: "${STREAM_DELAY_SECONDS}"
|
|
command: ["sh", "-c"]
|
|
args:
|
|
- |
|
|
pip install flask &&
|
|
python /app/openai-mock-server.py --port ${MOCK_INFERENCE_PORT}
|
|
volumeMounts:
|
|
- name: openai-mock-script
|
|
mountPath: /app
|
|
volumes:
|
|
- name: openai-mock-script
|
|
configMap:
|
|
name: openai-mock
|
|
---
|
|
apiVersion: v1
|
|
kind: Service
|
|
metadata:
|
|
name: openai-mock-service
|
|
spec:
|
|
selector:
|
|
app: openai-mock
|
|
ports:
|
|
- port: 8080
|
|
targetPort: 8080
|
|
type: ClusterIP
|