mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-21 17:33:12 +00:00
# What does this PR do? 1. Add our own benchmark script instead of locust (doesn't support measuring streaming latency well) 2. Simplify k8s deployment 3. Add a simple profile script for locally running server ## Test Plan ❮ ./run-benchmark.sh --target stack --duration 180 --concurrent 10 ============================================================ BENCHMARK RESULTS ============================================================ Total time: 180.00s Concurrent users: 10 Total requests: 1636 Successful requests: 1636 Failed requests: 0 Success rate: 100.0% Requests per second: 9.09 Response Time Statistics: Mean: 1.095s Median: 1.721s Min: 0.136s Max: 3.218s Std Dev: 0.762s Percentiles: P50: 1.721s P90: 1.751s P95: 1.756s P99: 1.796s Time to First Token (TTFT) Statistics: Mean: 0.037s Median: 0.037s Min: 0.023s Max: 0.211s Std Dev: 0.011s TTFT Percentiles: P50: 0.037s P90: 0.040s P95: 0.044s P99: 0.055s Streaming Statistics: Mean chunks per response: 64.0 Total chunks received: 104775
36 lines
1.1 KiB
Bash
Executable file
36 lines
1.1 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# Deploys the benchmark-specific components on top of the base k8s deployment (../k8s/apply.sh).
|
|
|
|
export STREAM_DELAY_SECONDS=0.005
|
|
|
|
export POSTGRES_USER=llamastack
|
|
export POSTGRES_DB=llamastack
|
|
export POSTGRES_PASSWORD=llamastack
|
|
|
|
export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct
|
|
export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B
|
|
|
|
export MOCK_INFERENCE_MODEL=mock-inference
|
|
|
|
export MOCK_INFERENCE_URL=openai-mock-service:8080
|
|
|
|
export BENCHMARK_INFERENCE_MODEL=$INFERENCE_MODEL
|
|
|
|
set -euo pipefail
|
|
set -x
|
|
|
|
# Deploy benchmark-specific components
|
|
kubectl create configmap llama-stack-config --from-file=stack_run_config.yaml \
|
|
--dry-run=client -o yaml > stack-configmap.yaml
|
|
|
|
kubectl apply --validate=false -f stack-configmap.yaml
|
|
|
|
# Deploy our custom llama stack server (overriding the base one)
|
|
envsubst < stack-k8s.yaml.template | kubectl apply --validate=false -f -
|