mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
# What does this PR do? Adds a write worker queue for writes to inference store. This avoids overwhelming request processing with slow inference writes. ## Test Plan Benchmark: ``` cd /docs/source/distributions/k8s-benchmark # start mock server python openai-mock-server.py --port 8000 # start stack server uv run --with llama-stack python -m llama_stack.core.server.server docs/source/distributions/k8s-benchmark/stack_run_config.yaml # run benchmark script uv run python3 benchmark.py --duration 120 --concurrent 50 --base-url=http://localhost:8321/v1/openai/v1 --model=vllm-inference/meta-llama/Llama-3.2-3B-Instruct ``` Before: ============================================================ BENCHMARK RESULTS Response Time Statistics: Mean: 1.111s Median: 0.982s Min: 0.466s Max: 15.190s Std Dev: 1.091s Percentiles: P50: 0.982s P90: 1.281s P95: 1.439s P99: 5.476s Time to First Token (TTFT) Statistics: Mean: 0.474s Median: 0.347s Min: 0.175s Max: 15.129s Std Dev: 0.819s TTFT Percentiles: P50: 0.347s P90: 0.661s P95: 0.762s P99: 2.788s Streaming Statistics: Mean chunks per response: 67.2 Total chunks received: 122154 ============================================================ Total time: 120.00s Concurrent users: 50 Total requests: 1919 Successful requests: 1819 Failed requests: 100 Success rate: 94.8% Requests per second: 15.16 Errors (showing first 5): Request error: Request error: Request error: Request error: Request error: Benchmark completed. Stopping server (PID: 679)... Server stopped. After: ============================================================ BENCHMARK RESULTS Response Time Statistics: Mean: 1.085s Median: 1.089s Min: 0.451s Max: 2.002s Std Dev: 0.212s Percentiles: P50: 1.089s P90: 1.343s P95: 1.409s P99: 1.617s Time to First Token (TTFT) Statistics: Mean: 0.407s Median: 0.361s Min: 0.182s Max: 1.178s Std Dev: 0.175s TTFT Percentiles: P50: 0.361s P90: 0.644s P95: 0.744s P99: 0.932s Streaming Statistics: Mean chunks per response: 66.8 Total chunks received: 367240 ============================================================ Total time: 120.00s Concurrent users: 50 Total requests: 5495 Successful requests: 5495 Failed requests: 0 Success rate: 100.0% Requests per second: 45.79 Benchmark completed. Stopping server (PID: 97169)... Server stopped.
134 lines
4 KiB
YAML
134 lines
4 KiB
YAML
version: '2'
|
|
image_name: kubernetes-benchmark-demo
|
|
apis:
|
|
- agents
|
|
- files
|
|
- inference
|
|
- files
|
|
- safety
|
|
- telemetry
|
|
- tool_runtime
|
|
- vector_io
|
|
providers:
|
|
inference:
|
|
- provider_id: vllm-inference
|
|
provider_type: remote::vllm
|
|
config:
|
|
url: ${env.VLLM_URL:=http://localhost:8000/v1}
|
|
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
|
api_token: ${env.VLLM_API_TOKEN:=fake}
|
|
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
|
- provider_id: sentence-transformers
|
|
provider_type: inline::sentence-transformers
|
|
config: {}
|
|
files:
|
|
- provider_id: meta-reference-files
|
|
provider_type: inline::localfs
|
|
config:
|
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
|
metadata_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db
|
|
vector_io:
|
|
- provider_id: ${env.ENABLE_CHROMADB:+chromadb}
|
|
provider_type: remote::chromadb
|
|
config:
|
|
url: ${env.CHROMADB_URL:=}
|
|
kvstore:
|
|
type: postgres
|
|
host: ${env.POSTGRES_HOST:=localhost}
|
|
port: ${env.POSTGRES_PORT:=5432}
|
|
db: ${env.POSTGRES_DB:=llamastack}
|
|
user: ${env.POSTGRES_USER:=llamastack}
|
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
|
files:
|
|
- provider_id: meta-reference-files
|
|
provider_type: inline::localfs
|
|
config:
|
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
|
metadata_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db
|
|
safety:
|
|
- provider_id: llama-guard
|
|
provider_type: inline::llama-guard
|
|
config:
|
|
excluded_categories: []
|
|
agents:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence_store:
|
|
type: postgres
|
|
host: ${env.POSTGRES_HOST:=localhost}
|
|
port: ${env.POSTGRES_PORT:=5432}
|
|
db: ${env.POSTGRES_DB:=llamastack}
|
|
user: ${env.POSTGRES_USER:=llamastack}
|
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
|
responses_store:
|
|
type: postgres
|
|
host: ${env.POSTGRES_HOST:=localhost}
|
|
port: ${env.POSTGRES_PORT:=5432}
|
|
db: ${env.POSTGRES_DB:=llamastack}
|
|
user: ${env.POSTGRES_USER:=llamastack}
|
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
|
telemetry:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
|
|
sinks: ${env.TELEMETRY_SINKS:=console}
|
|
tool_runtime:
|
|
- provider_id: brave-search
|
|
provider_type: remote::brave-search
|
|
config:
|
|
api_key: ${env.BRAVE_SEARCH_API_KEY:+}
|
|
max_results: 3
|
|
- provider_id: tavily-search
|
|
provider_type: remote::tavily-search
|
|
config:
|
|
api_key: ${env.TAVILY_SEARCH_API_KEY:+}
|
|
max_results: 3
|
|
- provider_id: rag-runtime
|
|
provider_type: inline::rag-runtime
|
|
config: {}
|
|
- provider_id: model-context-protocol
|
|
provider_type: remote::model-context-protocol
|
|
config: {}
|
|
metadata_store:
|
|
type: postgres
|
|
host: ${env.POSTGRES_HOST:=localhost}
|
|
port: ${env.POSTGRES_PORT:=5432}
|
|
db: ${env.POSTGRES_DB:=llamastack}
|
|
user: ${env.POSTGRES_USER:=llamastack}
|
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
|
table_name: llamastack_kvstore
|
|
inference_store:
|
|
type: postgres
|
|
host: ${env.POSTGRES_HOST:=localhost}
|
|
port: ${env.POSTGRES_PORT:=5432}
|
|
db: ${env.POSTGRES_DB:=llamastack}
|
|
user: ${env.POSTGRES_USER:=llamastack}
|
|
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
|
models:
|
|
- metadata:
|
|
embedding_dimension: 384
|
|
model_id: all-MiniLM-L6-v2
|
|
provider_id: sentence-transformers
|
|
model_type: embedding
|
|
- model_id: ${env.INFERENCE_MODEL}
|
|
provider_id: vllm-inference
|
|
model_type: llm
|
|
shields:
|
|
- shield_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}
|
|
vector_dbs: []
|
|
datasets: []
|
|
scoring_fns: []
|
|
benchmarks: []
|
|
tool_groups:
|
|
- toolgroup_id: builtin::websearch
|
|
provider_id: tavily-search
|
|
- toolgroup_id: builtin::rag
|
|
provider_id: rag-runtime
|
|
server:
|
|
port: 8323
|