mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-08 21:04:39 +00:00
# What does this PR do? Refactor main to split out the app construction so that we can use `uvicorn --workers` to enable multi-process stack. ## Test Plan CI > uv run --with llama-stack python -m llama_stack.core.server.server benchmarking/k8s-benchmark/stack_run_config.yaml works. > LLAMA_STACK_CONFIG=benchmarking/k8s-benchmark/stack_run_config.yaml uv run uvicorn llama_stack.core.server.server:create_app --port 8321 --workers 4 works.
33 lines
1 KiB
Bash
Executable file
33 lines
1 KiB
Bash
Executable file
#!/usr/bin/env bash
|
|
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# Deploys the benchmark-specific components on top of the base k8s deployment (../k8s/apply.sh).
|
|
|
|
export STREAM_DELAY_SECONDS=0.005
|
|
|
|
export POSTGRES_USER=llamastack
|
|
export POSTGRES_DB=llamastack
|
|
export POSTGRES_PASSWORD=llamastack
|
|
|
|
export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct
|
|
export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B
|
|
|
|
export BENCHMARK_INFERENCE_MODEL=$INFERENCE_MODEL
|
|
export LLAMA_STACK_WORKERS=4
|
|
|
|
set -euo pipefail
|
|
set -x
|
|
|
|
# Deploy benchmark-specific components
|
|
kubectl create configmap llama-stack-config --from-file=stack_run_config.yaml \
|
|
--dry-run=client -o yaml > stack-configmap.yaml
|
|
|
|
kubectl apply --validate=false -f stack-configmap.yaml
|
|
|
|
# Deploy our custom llama stack server (overriding the base one)
|
|
envsubst < stack-k8s.yaml.template | kubectl apply --validate=false -f -
|