mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-30 07:39:38 +00:00
Merge branch 'main' of https://github.com/zainhas/weaviate-memory-adapter
This commit is contained in:
commit
3eb03da7b5
5 changed files with 13 additions and 12 deletions
|
@ -55,7 +55,7 @@ conda create -n stack python=3.10
|
||||||
conda activate stack
|
conda activate stack
|
||||||
|
|
||||||
cd llama-stack
|
cd llama-stack
|
||||||
pip install -e .
|
$CONDA_PREFIX/bin/pip install -e .
|
||||||
```
|
```
|
||||||
|
|
||||||
## The Llama CLI
|
## The Llama CLI
|
||||||
|
|
|
@ -77,8 +77,8 @@ ensure_conda_env_python310() {
|
||||||
|
|
||||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
# these packages are damaged in test-pypi, so install them first
|
# these packages are damaged in test-pypi, so install them first
|
||||||
pip install fastapi libcst
|
$CONDA_PREFIX/bin/pip install fastapi libcst
|
||||||
pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies
|
$CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies
|
||||||
else
|
else
|
||||||
# Re-installing llama-stack in the new conda environment
|
# Re-installing llama-stack in the new conda environment
|
||||||
if [ -n "$LLAMA_STACK_DIR" ]; then
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
|
@ -88,9 +88,9 @@ ensure_conda_env_python310() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
||||||
pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
||||||
else
|
else
|
||||||
pip install --no-cache-dir llama-stack
|
$CONDA_PREFIX/bin/pip install --no-cache-dir llama-stack
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||||
|
@ -100,14 +100,14 @@ ensure_conda_env_python310() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
||||||
pip uninstall -y llama-models
|
$CONDA_PREFIX/bin/pip uninstall -y llama-models
|
||||||
pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install pip dependencies
|
# Install pip dependencies
|
||||||
if [ -n "$pip_dependencies" ]; then
|
if [ -n "$pip_dependencies" ]; then
|
||||||
printf "Installing pip dependencies: $pip_dependencies\n"
|
printf "Installing pip dependencies: $pip_dependencies\n"
|
||||||
pip install $pip_dependencies
|
$CONDA_PREFIX/bin/pip install $pip_dependencies
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,8 @@ import importlib
|
||||||
import inspect
|
import inspect
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from llama_stack.apis.agents import Agents
|
from llama_stack.apis.agents import Agents
|
||||||
from llama_stack.apis.inference import Inference
|
from llama_stack.apis.inference import Inference
|
||||||
from llama_stack.apis.memory import Memory
|
from llama_stack.apis.memory import Memory
|
||||||
|
@ -17,8 +19,6 @@ from llama_stack.apis.safety import Safety
|
||||||
from llama_stack.apis.shields import Shields
|
from llama_stack.apis.shields import Shields
|
||||||
from llama_stack.apis.telemetry import Telemetry
|
from llama_stack.apis.telemetry import Telemetry
|
||||||
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec
|
from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec
|
||||||
|
|
||||||
# These are the dependencies needed by the distribution server.
|
# These are the dependencies needed by the distribution server.
|
||||||
|
@ -26,6 +26,7 @@ from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec
|
||||||
SERVER_DEPENDENCIES = [
|
SERVER_DEPENDENCIES = [
|
||||||
"fastapi",
|
"fastapi",
|
||||||
"fire",
|
"fire",
|
||||||
|
"httpx",
|
||||||
"uvicorn",
|
"uvicorn",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ blobfile
|
||||||
fire
|
fire
|
||||||
httpx
|
httpx
|
||||||
huggingface-hub
|
huggingface-hub
|
||||||
llama-models>=0.0.21
|
llama-models>=0.0.24
|
||||||
prompt-toolkit
|
prompt-toolkit
|
||||||
python-dotenv
|
python-dotenv
|
||||||
pydantic
|
pydantic
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -16,7 +16,7 @@ def read_requirements():
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="llama_stack",
|
name="llama_stack",
|
||||||
version="0.0.21",
|
version="0.0.24",
|
||||||
author="Meta Llama",
|
author="Meta Llama",
|
||||||
author_email="llama-oss@meta.com",
|
author_email="llama-oss@meta.com",
|
||||||
description="Llama Stack",
|
description="Llama Stack",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue