mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
Merge branch 'main' of https://github.com/zainhas/weaviate-memory-adapter
This commit is contained in:
commit
3eb03da7b5
5 changed files with 13 additions and 12 deletions
|
@ -55,7 +55,7 @@ conda create -n stack python=3.10
|
|||
conda activate stack
|
||||
|
||||
cd llama-stack
|
||||
pip install -e .
|
||||
$CONDA_PREFIX/bin/pip install -e .
|
||||
```
|
||||
|
||||
## The Llama CLI
|
||||
|
|
|
@ -77,8 +77,8 @@ ensure_conda_env_python310() {
|
|||
|
||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||
# these packages are damaged in test-pypi, so install them first
|
||||
pip install fastapi libcst
|
||||
pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies
|
||||
$CONDA_PREFIX/bin/pip install fastapi libcst
|
||||
$CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION $pip_dependencies
|
||||
else
|
||||
# Re-installing llama-stack in the new conda environment
|
||||
if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||
|
@ -88,9 +88,9 @@ ensure_conda_env_python310() {
|
|||
fi
|
||||
|
||||
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
||||
pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
||||
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
||||
else
|
||||
pip install --no-cache-dir llama-stack
|
||||
$CONDA_PREFIX/bin/pip install --no-cache-dir llama-stack
|
||||
fi
|
||||
|
||||
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||
|
@ -100,14 +100,14 @@ ensure_conda_env_python310() {
|
|||
fi
|
||||
|
||||
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
||||
pip uninstall -y llama-models
|
||||
pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
||||
$CONDA_PREFIX/bin/pip uninstall -y llama-models
|
||||
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
||||
fi
|
||||
|
||||
# Install pip dependencies
|
||||
if [ -n "$pip_dependencies" ]; then
|
||||
printf "Installing pip dependencies: $pip_dependencies\n"
|
||||
pip install $pip_dependencies
|
||||
$CONDA_PREFIX/bin/pip install $pip_dependencies
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@ import importlib
|
|||
import inspect
|
||||
from typing import Dict, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.apis.agents import Agents
|
||||
from llama_stack.apis.inference import Inference
|
||||
from llama_stack.apis.memory import Memory
|
||||
|
@ -17,8 +19,6 @@ from llama_stack.apis.safety import Safety
|
|||
from llama_stack.apis.shields import Shields
|
||||
from llama_stack.apis.telemetry import Telemetry
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec
|
||||
|
||||
# These are the dependencies needed by the distribution server.
|
||||
|
@ -26,6 +26,7 @@ from .datatypes import Api, ApiEndpoint, ProviderSpec, remote_provider_spec
|
|||
SERVER_DEPENDENCIES = [
|
||||
"fastapi",
|
||||
"fire",
|
||||
"httpx",
|
||||
"uvicorn",
|
||||
]
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ blobfile
|
|||
fire
|
||||
httpx
|
||||
huggingface-hub
|
||||
llama-models>=0.0.21
|
||||
llama-models>=0.0.24
|
||||
prompt-toolkit
|
||||
python-dotenv
|
||||
pydantic
|
||||
|
|
2
setup.py
2
setup.py
|
@ -16,7 +16,7 @@ def read_requirements():
|
|||
|
||||
setup(
|
||||
name="llama_stack",
|
||||
version="0.0.21",
|
||||
version="0.0.24",
|
||||
author="Meta Llama",
|
||||
author_email="llama-oss@meta.com",
|
||||
description="Llama Stack",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue