diff --git a/README.md b/README.md index 0e3efde71..d27eb718f 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ conda create -n stack python=3.10 conda activate stack cd llama-stack -pip install -e . +$CONDA_PREFIX/bin/pip install -e . ``` ## The Llama CLI diff --git a/llama_stack/distribution/build_conda_env.sh b/llama_stack/distribution/build_conda_env.sh index b210a8c8b..abe59d978 100755 --- a/llama_stack/distribution/build_conda_env.sh +++ b/llama_stack/distribution/build_conda_env.sh @@ -77,8 +77,8 @@ ensure_conda_env_python310() { if [ -n "$TEST_PYPI_VERSION" ]; then # these packages are damaged in test-pypi, so install them first - pip install fastapi libcst - pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies + $CONDA_PREFIX/bin/pip install fastapi libcst + $CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies else # Re-installing llama-stack in the new conda environment if [ -n "$LLAMA_STACK_DIR" ]; then @@ -88,9 +88,9 @@ ensure_conda_env_python310() { fi printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n" - pip install --no-cache-dir -e "$LLAMA_STACK_DIR" + $CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR" else - pip install --no-cache-dir llama-stack + $CONDA_PREFIX/bin/pip install --no-cache-dir llama-stack fi if [ -n "$LLAMA_MODELS_DIR" ]; then @@ -100,14 +100,14 @@ ensure_conda_env_python310() { fi printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n" - pip uninstall -y llama-models - pip install --no-cache-dir -e "$LLAMA_MODELS_DIR" + $CONDA_PREFIX/bin/pip uninstall -y llama-models + $CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR" fi # Install pip dependencies if [ -n "$pip_dependencies" ]; then printf "Installing pip dependencies: $pip_dependencies\n" - pip install $pip_dependencies + $CONDA_PREFIX/bin/pip install $pip_dependencies fi fi } diff --git a/llama_stack/distribution/distribution.py b/llama_stack/distribution/distribution.py index b641b6582..035febb80 100644 --- a/llama_stack/distribution/distribution.py +++ b/llama_stack/distribution/distribution.py @@ -8,6 +8,8 @@ import importlib import inspect from typing import Dict, List +from pydantic import BaseModel + from llama_stack.apis.agents import Agents from llama_stack.apis.inference import Inference from llama_stack.apis.memory import Memory @@ -17,8 +19,6 @@ from llama_stack.apis.safety import Safety from llama_stack.apis.shields import Shields from llama_stack.apis.telemetry import Telemetry -from pydantic import BaseModel - from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec # These are the dependencies needed by the distribution server. @@ -26,6 +26,7 @@ from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec SERVER_DEPENDENCIES = [ "fastapi", "fire", + "httpx", "uvicorn", ] diff --git a/requirements.txt b/requirements.txt index 3351b9c6f..2b2f3fea1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ blobfile fire httpx huggingface-hub -llama-models>=0.0.21 +llama-models>=0.0.24 prompt-toolkit python-dotenv pydantic diff --git a/setup.py b/setup.py index 4f01fceb8..f389d5364 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ def read_requirements(): setup( name="llama_stack", - version="0.0.21", + version="0.0.24", author="Meta Llama", author_email="llama-oss@meta.com", description="Llama Stack",