mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
fix: update pyproject to include recursive LS deps (#2404)
trying to run `llama` cli after installing wheel fails with this error ``` Traceback (most recent call last): File "/tmp/tmp.wdZath9U6j/.venv/bin/llama", line 4, in <module> from llama_stack.cli.llama import main File "/tmp/tmp.wdZath9U6j/.venv/lib/python3.10/site-packages/llama_stack/__init__.py", line 7, in <module> from llama_stack.distribution.library_client import ( # noqa: F401 ModuleNotFoundError: No module named 'llama_stack.distribution.library_client' ``` This PR fixes it by ensurring that all sub-directories of `llama_stack` are also included. Also, fixes the missing `fastapi` dependency issue.
This commit is contained in:
parent
4fb228a1d8
commit
04592b9590
3 changed files with 2317 additions and 2308 deletions
|
@ -22,6 +22,7 @@ classifiers = [
|
||||||
]
|
]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aiohttp",
|
"aiohttp",
|
||||||
|
"fastapi",
|
||||||
"fire",
|
"fire",
|
||||||
"httpx",
|
"httpx",
|
||||||
"huggingface-hub",
|
"huggingface-hub",
|
||||||
|
@ -67,7 +68,6 @@ dev = [
|
||||||
"types-setuptools",
|
"types-setuptools",
|
||||||
"pre-commit",
|
"pre-commit",
|
||||||
"uvicorn",
|
"uvicorn",
|
||||||
"fastapi",
|
|
||||||
"ruamel.yaml", # needed for openapi generator
|
"ruamel.yaml", # needed for openapi generator
|
||||||
]
|
]
|
||||||
# These are the dependencies required for running unit tests.
|
# These are the dependencies required for running unit tests.
|
||||||
|
@ -132,7 +132,8 @@ llama = "llama_stack.cli.llama:main"
|
||||||
install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_presigned"
|
install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_presigned"
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
include = ["llama_stack"]
|
where = ["."]
|
||||||
|
include = ["llama_stack", "llama_stack.*"]
|
||||||
|
|
||||||
[[tool.uv.index]]
|
[[tool.uv.index]]
|
||||||
name = "pytorch-cpu"
|
name = "pytorch-cpu"
|
||||||
|
|
|
@ -42,6 +42,8 @@ ecdsa==0.19.1
|
||||||
# via python-jose
|
# via python-jose
|
||||||
exceptiongroup==1.2.2 ; python_full_version < '3.11'
|
exceptiongroup==1.2.2 ; python_full_version < '3.11'
|
||||||
# via anyio
|
# via anyio
|
||||||
|
fastapi==0.115.8
|
||||||
|
# via llama-stack
|
||||||
filelock==3.17.0
|
filelock==3.17.0
|
||||||
# via huggingface-hub
|
# via huggingface-hub
|
||||||
fire==0.7.0
|
fire==0.7.0
|
||||||
|
@ -117,6 +119,7 @@ pyasn1==0.4.8
|
||||||
# rsa
|
# rsa
|
||||||
pydantic==2.10.6
|
pydantic==2.10.6
|
||||||
# via
|
# via
|
||||||
|
# fastapi
|
||||||
# llama-stack
|
# llama-stack
|
||||||
# llama-stack-client
|
# llama-stack-client
|
||||||
# openai
|
# openai
|
||||||
|
@ -171,7 +174,9 @@ sniffio==1.3.1
|
||||||
# llama-stack-client
|
# llama-stack-client
|
||||||
# openai
|
# openai
|
||||||
starlette==0.45.3
|
starlette==0.45.3
|
||||||
# via llama-stack
|
# via
|
||||||
|
# fastapi
|
||||||
|
# llama-stack
|
||||||
termcolor==2.5.0
|
termcolor==2.5.0
|
||||||
# via
|
# via
|
||||||
# fire
|
# fire
|
||||||
|
@ -187,6 +192,7 @@ tqdm==4.67.1
|
||||||
typing-extensions==4.12.2
|
typing-extensions==4.12.2
|
||||||
# via
|
# via
|
||||||
# anyio
|
# anyio
|
||||||
|
# fastapi
|
||||||
# huggingface-hub
|
# huggingface-hub
|
||||||
# llama-stack-client
|
# llama-stack-client
|
||||||
# multidict
|
# multidict
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue