mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-18 02:49:49 +00:00
benchmark, local test, ttft, duration
# What does this PR do? ## Test Plan # What does this PR do? ## Test Plan # What does this PR do? ## Test Plan # What does this PR do? ## Test Plan
This commit is contained in:
parent
f66ae3b3b1
commit
244ff9efbd
13 changed files with 633 additions and 328 deletions
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
# Deploys the benchmark-specific components on top of the base k8s deployment (../k8s/apply.sh).
|
||||
|
||||
export MOCK_INFERENCE_PORT=8080
|
||||
export STREAM_DELAY_SECONDS=0.005
|
||||
|
||||
export POSTGRES_USER=llamastack
|
||||
|
|
@ -20,14 +19,7 @@ export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B
|
|||
|
||||
export MOCK_INFERENCE_MODEL=mock-inference
|
||||
|
||||
# Use llama-stack-benchmark-service as the benchmark server
|
||||
export LOCUST_HOST=http://llama-stack-benchmark-service:8323
|
||||
export LOCUST_BASE_PATH=/v1/openai/v1
|
||||
|
||||
# Use vllm-service as the benchmark server
|
||||
# export LOCUST_HOST=http://vllm-server:8000
|
||||
# export LOCUST_BASE_PATH=/v1
|
||||
|
||||
export MOCK_INFERENCE_URL=openai-mock-service:8080
|
||||
|
||||
export BENCHMARK_INFERENCE_MODEL=$INFERENCE_MODEL
|
||||
|
||||
|
|
@ -35,13 +27,6 @@ set -euo pipefail
|
|||
set -x
|
||||
|
||||
# Deploy benchmark-specific components
|
||||
# Deploy OpenAI mock server
|
||||
kubectl create configmap openai-mock --from-file=openai-mock-server.py \
|
||||
--dry-run=client -o yaml | kubectl apply --validate=false -f -
|
||||
|
||||
envsubst < openai-mock-deployment.yaml | kubectl apply --validate=false -f -
|
||||
|
||||
# Create configmap with our custom stack config
|
||||
kubectl create configmap llama-stack-config --from-file=stack_run_config.yaml \
|
||||
--dry-run=client -o yaml > stack-configmap.yaml
|
||||
|
||||
|
|
@ -49,9 +34,3 @@ kubectl apply --validate=false -f stack-configmap.yaml
|
|||
|
||||
# Deploy our custom llama stack server (overriding the base one)
|
||||
envsubst < stack-k8s.yaml.template | kubectl apply --validate=false -f -
|
||||
|
||||
# Deploy Locust load testing
|
||||
kubectl create configmap locust-script --from-file=locustfile.py \
|
||||
--dry-run=client -o yaml | kubectl apply --validate=false -f -
|
||||
|
||||
envsubst < locust-k8s.yaml | kubectl apply --validate=false -f -
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue