diff --git a/llama_stack/cli/stack/configure.py b/llama_stack/cli/stack/configure.py index 0263166ce..d739aad50 100644 --- a/llama_stack/cli/stack/configure.py +++ b/llama_stack/cli/stack/configure.py @@ -107,7 +107,7 @@ class StackConfigure(Subcommand): if run_config_file.exists(): cprint( - f"Configuration already exists for {build_config.name}. Will overwrite...", + f"Configuration already exists at `{str(run_config_file)}`. Will overwrite...", "yellow", attrs=["bold"], ) diff --git a/llama_stack/configs/distributions/conda/local-conda-example-build.yaml b/llama_stack/configs/distributions/conda/local-conda-example-build.yaml index 143335f51..f98e48570 100644 --- a/llama_stack/configs/distributions/conda/local-conda-example-build.yaml +++ b/llama_stack/configs/distributions/conda/local-conda-example-build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Use code from `llama_stack` itself to serve all llama stack APIs providers: inference: meta-reference - memory: meta-reference-faiss + memory: meta-reference safety: meta-reference agents: meta-reference - telemetry: console + telemetry: meta-reference image_type: conda diff --git a/llama_stack/configs/distributions/conda/local-fireworks-conda-example-build.yaml b/llama_stack/configs/distributions/conda/local-fireworks-conda-example-build.yaml index 157b6d011..5e17f83c0 100644 --- a/llama_stack/configs/distributions/conda/local-fireworks-conda-example-build.yaml +++ b/llama_stack/configs/distributions/conda/local-fireworks-conda-example-build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Use Fireworks.ai for running LLM inference providers: inference: remote::fireworks - memory: meta-reference-faiss + memory: meta-reference safety: meta-reference agents: meta-reference - telemetry: console + telemetry: meta-reference image_type: conda diff --git a/llama_stack/configs/distributions/conda/local-ollama-conda-example-build.yaml b/llama_stack/configs/distributions/conda/local-ollama-conda-example-build.yaml index ad4966aa9..1c43e5998 100644 --- a/llama_stack/configs/distributions/conda/local-ollama-conda-example-build.yaml +++ b/llama_stack/configs/distributions/conda/local-ollama-conda-example-build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Like local, but use ollama for running LLM inference providers: inference: remote::ollama - memory: meta-reference-faiss + memory: meta-reference safety: meta-reference agents: meta-reference - telemetry: console + telemetry: meta-reference image_type: conda diff --git a/llama_stack/configs/distributions/conda/local-tgi-conda-example-build.yaml b/llama_stack/configs/distributions/conda/local-tgi-conda-example-build.yaml index 3439682ed..07848b130 100644 --- a/llama_stack/configs/distributions/conda/local-tgi-conda-example-build.yaml +++ b/llama_stack/configs/distributions/conda/local-tgi-conda-example-build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). providers: inference: remote::tgi - memory: meta-reference-faiss + memory: meta-reference safety: meta-reference agents: meta-reference - telemetry: console + telemetry: meta-reference image_type: conda diff --git a/llama_stack/configs/distributions/conda/local-together-conda-example-build.yaml b/llama_stack/configs/distributions/conda/local-together-conda-example-build.yaml index df6d8b3bf..df11bd0ed 100644 --- a/llama_stack/configs/distributions/conda/local-together-conda-example-build.yaml +++ b/llama_stack/configs/distributions/conda/local-together-conda-example-build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Use Together.ai for running LLM inference providers: inference: remote::together - memory: meta-reference-faiss + memory: meta-reference safety: meta-reference agents: meta-reference - telemetry: console + telemetry: meta-reference image_type: conda diff --git a/llama_stack/configs/distributions/docker/local-docker-example-build.yaml b/llama_stack/configs/distributions/docker/local-docker-example-build.yaml index b94203d49..885b6e58c 100644 --- a/llama_stack/configs/distributions/docker/local-docker-example-build.yaml +++ b/llama_stack/configs/distributions/docker/local-docker-example-build.yaml @@ -3,8 +3,8 @@ distribution_spec: description: Use code from `llama_stack` itself to serve all llama stack APIs providers: inference: meta-reference - memory: meta-reference-faiss + memory: meta-reference safety: meta-reference agents: meta-reference - telemetry: console + telemetry: meta-reference image_type: docker diff --git a/llama_stack/distribution/build_conda_env.sh b/llama_stack/distribution/build_conda_env.sh index 09969325d..b210a8c8b 100755 --- a/llama_stack/distribution/build_conda_env.sh +++ b/llama_stack/distribution/build_conda_env.sh @@ -7,11 +7,11 @@ # the root directory of this source tree. LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-} -LLAMA_TOOLCHAIN_DIR=${LLAMA_TOOLCHAIN_DIR:-} +LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} -if [ -n "$LLAMA_TOOLCHAIN_DIR" ]; then - echo "Using llama-stack-dir=$LLAMA_TOOLCHAIN_DIR" +if [ -n "$LLAMA_STACK_DIR" ]; then + echo "Using llama-stack-dir=$LLAMA_STACK_DIR" fi if [ -n "$LLAMA_MODELS_DIR" ]; then echo "Using llama-models-dir=$LLAMA_MODELS_DIR" @@ -81,14 +81,14 @@ ensure_conda_env_python310() { pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies else # Re-installing llama-stack in the new conda environment - if [ -n "$LLAMA_TOOLCHAIN_DIR" ]; then - if [ ! -d "$LLAMA_TOOLCHAIN_DIR" ]; then - printf "${RED}Warning: LLAMA_TOOLCHAIN_DIR is set but directory does not exist: $LLAMA_TOOLCHAIN_DIR${NC}\n" >&2 + if [ -n "$LLAMA_STACK_DIR" ]; then + if [ ! -d "$LLAMA_STACK_DIR" ]; then + printf "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}\n" >&2 exit 1 fi - printf "Installing from LLAMA_TOOLCHAIN_DIR: $LLAMA_TOOLCHAIN_DIR\n" - pip install --no-cache-dir -e "$LLAMA_TOOLCHAIN_DIR" + printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n" + pip install --no-cache-dir -e "$LLAMA_STACK_DIR" else pip install --no-cache-dir llama-stack fi diff --git a/llama_stack/distribution/build_container.sh b/llama_stack/distribution/build_container.sh index c993fcec5..836f9bf19 100755 --- a/llama_stack/distribution/build_container.sh +++ b/llama_stack/distribution/build_container.sh @@ -1,7 +1,7 @@ #!/bin/bash LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-} -LLAMA_TOOLCHAIN_DIR=${LLAMA_TOOLCHAIN_DIR:-} +LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} if [ "$#" -ne 4 ]; then @@ -55,15 +55,15 @@ RUN apt-get update && apt-get install -y \ EOF -toolchain_mount="/app/llama-stack-source" +stack_mount="/app/llama-stack-source" models_mount="/app/llama-models-source" -if [ -n "$LLAMA_TOOLCHAIN_DIR" ]; then - if [ ! -d "$LLAMA_TOOLCHAIN_DIR" ]; then - echo "${RED}Warning: LLAMA_TOOLCHAIN_DIR is set but directory does not exist: $LLAMA_TOOLCHAIN_DIR${NC}" >&2 +if [ -n "$LLAMA_STACK_DIR" ]; then + if [ ! -d "$LLAMA_STACK_DIR" ]; then + echo "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}" >&2 exit 1 fi - add_to_docker "RUN pip install $toolchain_mount" + add_to_docker "RUN pip install $stack_mount" else add_to_docker "RUN pip install llama-stack" fi @@ -90,7 +90,7 @@ add_to_docker <