forked from phoenix-oss/llama-stack-mirror
- Added new template `dell` and its documentation - Update docs - [minor] uv fix i came across - codegen for all templates Tested with ```bash export INFERENCE_PORT=8181 export DEH_URL=http://0.0.0.0:$INFERENCE_PORT export INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct export CHROMADB_HOST=localhost export CHROMADB_PORT=6601 export CHROMA_URL=[http://$CHROMADB_HOST:$CHROMADB_PORT](about:blank) export CUDA_VISIBLE_DEVICES=0 export LLAMA_STACK_PORT=8321 # build the stack template llama stack build --template=dell # start the TGI inference server podman run --rm -it --network host -v $HOME/.cache/huggingface:/data -e HF_TOKEN=$HF_TOKEN -p $INFERENCE_PORT:$INFERENCE_PORT --gpus $CUDA_VISIBLE_DEVICES [ghcr.io/huggingface/text-generation-inference](http://ghcr.io/huggingface/text-generation-inference) --dtype bfloat16 --usage-stats off --sharded false --cuda-memory-fraction 0.7 --model-id $INFERENCE_MODEL --port $INFERENCE_PORT --hostname 0.0.0.0 # start chroma-db for vector-io ( aka RAG ) podman run --rm -it --network host --name chromadb -v .:/chroma/chroma -e IS_PERSISTENT=TRUE chromadb/chroma:latest --port $CHROMADB_PORT --host $(hostname) # build docker llama stack build --template=dell --image-type=container # run llama stack server ( via docker ) podman run -it \ --network host \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ~/.llama:/root/.llama \ # NOTE: mount the llama-stack / llama-model directories if testing local changes -v /home/hjshah/git/llama-stack:/app/llama-stack-source -v /home/hjshah/git/llama-models:/app/llama-models-source \ localhost/distribution-dell:dev \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env DEH_URL=$DEH_URL \ --env CHROMA_URL=$CHROMA_URL # test the server cd <PATH_TO_LLAMA_STACK_REPO> LLAMA_STACK_BASE_URL=http://0.0.0.0:$LLAMA_STACK_PORT pytest -s -v tests/client-sdk/agents/test_agents.py ``` --------- Co-authored-by: Hardik Shah <hjshah@fb.com>
109 lines
3.3 KiB
Bash
Executable file
109 lines
3.3 KiB
Bash
Executable file
#!/bin/bash
|
|
|
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
# TODO: combine this with build_conda_env.sh since it is almost identical
|
|
# the only difference is that we don't do any conda-specific setup
|
|
|
|
LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
|
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
|
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
|
# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out
|
|
# Reference: https://github.com/astral-sh/uv/pull/1694
|
|
UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500}
|
|
|
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
|
echo "Using llama-stack-dir=$LLAMA_STACK_DIR"
|
|
fi
|
|
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
|
echo "Using llama-models-dir=$LLAMA_MODELS_DIR"
|
|
fi
|
|
|
|
if [ "$#" -lt 3 ]; then
|
|
echo "Usage: $0 <distribution_type> <build_name> <build_file_path> <pip_dependencies> [<special_pip_deps>]" >&2
|
|
echo "Example: $0 <distribution_type> mybuild ./my-stack-build.yaml 'numpy pandas scipy'" >&2
|
|
exit 1
|
|
fi
|
|
|
|
special_pip_deps="$4"
|
|
|
|
set -euo pipefail
|
|
|
|
build_name="$1"
|
|
env_name="llamastack-$build_name"
|
|
build_file_path="$2"
|
|
pip_dependencies="$3"
|
|
|
|
# Define color codes
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# this is set if we actually create a new conda in which case we need to clean up
|
|
ENVNAME=""
|
|
|
|
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
|
|
source "$SCRIPT_DIR/common.sh"
|
|
|
|
run() {
|
|
local env_name="$1"
|
|
local pip_dependencies="$2"
|
|
local special_pip_deps="$3"
|
|
|
|
pip install uv
|
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
|
# these packages are damaged in test-pypi, so install them first
|
|
uv pip install fastapi libcst
|
|
uv pip install --extra-index-url https://test.pypi.org/simple/ \
|
|
llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION \
|
|
$pip_dependencies
|
|
if [ -n "$special_pip_deps" ]; then
|
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
|
for part in "${parts[@]}"; do
|
|
echo "$part"
|
|
uv pip install $part
|
|
done
|
|
fi
|
|
else
|
|
# Re-installing llama-stack in the new conda environment
|
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
|
if [ ! -d "$LLAMA_STACK_DIR" ]; then
|
|
printf "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}\n" >&2
|
|
exit 1
|
|
fi
|
|
|
|
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
|
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
|
else
|
|
uv pip install --no-cache-dir llama-stack
|
|
fi
|
|
|
|
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
|
if [ ! -d "$LLAMA_MODELS_DIR" ]; then
|
|
printf "${RED}Warning: LLAMA_MODELS_DIR is set but directory does not exist: $LLAMA_MODELS_DIR${NC}\n" >&2
|
|
exit 1
|
|
fi
|
|
|
|
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
|
uv pip uninstall llama-models
|
|
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
|
fi
|
|
|
|
# Install pip dependencies
|
|
printf "Installing pip dependencies\n"
|
|
uv pip install $pip_dependencies
|
|
if [ -n "$special_pip_deps" ]; then
|
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
|
for part in "${parts[@]}"; do
|
|
echo "$part"
|
|
uv pip install $part
|
|
done
|
|
fi
|
|
fi
|
|
}
|
|
|
|
run "$env_name" "$pip_dependencies" "$special_pip_deps"
|