mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
chore: remove requirements.txt
We use pyprojects to maintain our dependencies. Additionally, we use a uv.lock to pin the dependencies versions. Having a requirements.txt should not be needed. Also, I think this prevents dependabot from updating our uv.lock. Before this https://github.com/meta-llama/llama-stack/issues/2908 gets implemented, let's see if we can at least get some update from pyproject. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
85223ccc4d
commit
bc1ab78417
2 changed files with 1 additions and 271 deletions
|
@ -61,8 +61,7 @@ repos:
|
||||||
"--frozen",
|
"--frozen",
|
||||||
"--no-hashes",
|
"--no-hashes",
|
||||||
"--no-emit-project",
|
"--no-emit-project",
|
||||||
"--no-default-groups",
|
"--no-default-groups"
|
||||||
"--output-file=requirements.txt"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
|
|
269
requirements.txt
269
requirements.txt
|
@ -1,269 +0,0 @@
|
||||||
# This file was autogenerated by uv via the following command:
|
|
||||||
# uv export --frozen --no-hashes --no-emit-project --no-default-groups --output-file=requirements.txt
|
|
||||||
aiohappyeyeballs==2.5.0
|
|
||||||
# via aiohttp
|
|
||||||
aiohttp==3.12.13
|
|
||||||
# via llama-stack
|
|
||||||
aiosignal==1.3.2
|
|
||||||
# via aiohttp
|
|
||||||
aiosqlite==0.21.0
|
|
||||||
# via llama-stack
|
|
||||||
annotated-types==0.7.0
|
|
||||||
# via pydantic
|
|
||||||
anyio==4.8.0
|
|
||||||
# via
|
|
||||||
# httpx
|
|
||||||
# llama-api-client
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
# starlette
|
|
||||||
asyncpg==0.30.0
|
|
||||||
# via llama-stack
|
|
||||||
attrs==25.1.0
|
|
||||||
# via
|
|
||||||
# aiohttp
|
|
||||||
# jsonschema
|
|
||||||
# referencing
|
|
||||||
certifi==2025.1.31
|
|
||||||
# via
|
|
||||||
# httpcore
|
|
||||||
# httpx
|
|
||||||
# requests
|
|
||||||
cffi==1.17.1 ; platform_python_implementation != 'PyPy'
|
|
||||||
# via cryptography
|
|
||||||
charset-normalizer==3.4.1
|
|
||||||
# via requests
|
|
||||||
click==8.1.8
|
|
||||||
# via
|
|
||||||
# llama-stack-client
|
|
||||||
# uvicorn
|
|
||||||
colorama==0.4.6 ; sys_platform == 'win32'
|
|
||||||
# via
|
|
||||||
# click
|
|
||||||
# tqdm
|
|
||||||
cryptography==45.0.5
|
|
||||||
# via python-jose
|
|
||||||
deprecated==1.2.18
|
|
||||||
# via
|
|
||||||
# opentelemetry-api
|
|
||||||
# opentelemetry-exporter-otlp-proto-http
|
|
||||||
# opentelemetry-semantic-conventions
|
|
||||||
distro==1.9.0
|
|
||||||
# via
|
|
||||||
# llama-api-client
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
ecdsa==0.19.1
|
|
||||||
# via python-jose
|
|
||||||
fastapi==0.115.8
|
|
||||||
# via llama-stack
|
|
||||||
filelock==3.17.0
|
|
||||||
# via huggingface-hub
|
|
||||||
fire==0.7.0
|
|
||||||
# via
|
|
||||||
# llama-stack
|
|
||||||
# llama-stack-client
|
|
||||||
frozenlist==1.5.0
|
|
||||||
# via
|
|
||||||
# aiohttp
|
|
||||||
# aiosignal
|
|
||||||
fsspec==2024.12.0
|
|
||||||
# via huggingface-hub
|
|
||||||
googleapis-common-protos==1.67.0
|
|
||||||
# via opentelemetry-exporter-otlp-proto-http
|
|
||||||
h11==0.16.0
|
|
||||||
# via
|
|
||||||
# httpcore
|
|
||||||
# llama-stack
|
|
||||||
# uvicorn
|
|
||||||
hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
|
|
||||||
# via huggingface-hub
|
|
||||||
httpcore==1.0.9
|
|
||||||
# via httpx
|
|
||||||
httpx==0.28.1
|
|
||||||
# via
|
|
||||||
# llama-api-client
|
|
||||||
# llama-stack
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
huggingface-hub==0.33.0
|
|
||||||
# via llama-stack
|
|
||||||
idna==3.10
|
|
||||||
# via
|
|
||||||
# anyio
|
|
||||||
# httpx
|
|
||||||
# requests
|
|
||||||
# yarl
|
|
||||||
importlib-metadata==8.5.0
|
|
||||||
# via opentelemetry-api
|
|
||||||
jinja2==3.1.6
|
|
||||||
# via llama-stack
|
|
||||||
jiter==0.8.2
|
|
||||||
# via openai
|
|
||||||
jsonschema==4.23.0
|
|
||||||
# via llama-stack
|
|
||||||
jsonschema-specifications==2024.10.1
|
|
||||||
# via jsonschema
|
|
||||||
llama-api-client==0.1.2
|
|
||||||
# via llama-stack
|
|
||||||
llama-stack-client==0.2.15
|
|
||||||
# via llama-stack
|
|
||||||
markdown-it-py==3.0.0
|
|
||||||
# via rich
|
|
||||||
markupsafe==3.0.2
|
|
||||||
# via jinja2
|
|
||||||
mdurl==0.1.2
|
|
||||||
# via markdown-it-py
|
|
||||||
multidict==6.1.0
|
|
||||||
# via
|
|
||||||
# aiohttp
|
|
||||||
# yarl
|
|
||||||
numpy==2.2.3
|
|
||||||
# via pandas
|
|
||||||
openai==1.71.0
|
|
||||||
# via llama-stack
|
|
||||||
opentelemetry-api==1.30.0
|
|
||||||
# via
|
|
||||||
# opentelemetry-exporter-otlp-proto-http
|
|
||||||
# opentelemetry-sdk
|
|
||||||
# opentelemetry-semantic-conventions
|
|
||||||
opentelemetry-exporter-otlp-proto-common==1.30.0
|
|
||||||
# via opentelemetry-exporter-otlp-proto-http
|
|
||||||
opentelemetry-exporter-otlp-proto-http==1.30.0
|
|
||||||
# via llama-stack
|
|
||||||
opentelemetry-proto==1.30.0
|
|
||||||
# via
|
|
||||||
# opentelemetry-exporter-otlp-proto-common
|
|
||||||
# opentelemetry-exporter-otlp-proto-http
|
|
||||||
opentelemetry-sdk==1.30.0
|
|
||||||
# via
|
|
||||||
# llama-stack
|
|
||||||
# opentelemetry-exporter-otlp-proto-http
|
|
||||||
opentelemetry-semantic-conventions==0.51b0
|
|
||||||
# via opentelemetry-sdk
|
|
||||||
packaging==24.2
|
|
||||||
# via huggingface-hub
|
|
||||||
pandas==2.2.3
|
|
||||||
# via llama-stack-client
|
|
||||||
pillow==11.1.0
|
|
||||||
# via llama-stack
|
|
||||||
prompt-toolkit==3.0.50
|
|
||||||
# via
|
|
||||||
# llama-stack
|
|
||||||
# llama-stack-client
|
|
||||||
propcache==0.3.0
|
|
||||||
# via
|
|
||||||
# aiohttp
|
|
||||||
# yarl
|
|
||||||
protobuf==5.29.5
|
|
||||||
# via
|
|
||||||
# googleapis-common-protos
|
|
||||||
# opentelemetry-proto
|
|
||||||
pyaml==25.1.0
|
|
||||||
# via llama-stack-client
|
|
||||||
pyasn1==0.4.8
|
|
||||||
# via
|
|
||||||
# python-jose
|
|
||||||
# rsa
|
|
||||||
pycparser==2.22 ; platform_python_implementation != 'PyPy'
|
|
||||||
# via cffi
|
|
||||||
pydantic==2.10.6
|
|
||||||
# via
|
|
||||||
# fastapi
|
|
||||||
# llama-api-client
|
|
||||||
# llama-stack
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
pydantic-core==2.27.2
|
|
||||||
# via pydantic
|
|
||||||
pygments==2.19.1
|
|
||||||
# via rich
|
|
||||||
python-dateutil==2.9.0.post0
|
|
||||||
# via pandas
|
|
||||||
python-dotenv==1.0.1
|
|
||||||
# via llama-stack
|
|
||||||
python-jose==3.4.0
|
|
||||||
# via llama-stack
|
|
||||||
python-multipart==0.0.20
|
|
||||||
# via llama-stack
|
|
||||||
pytz==2025.1
|
|
||||||
# via pandas
|
|
||||||
pyyaml==6.0.2
|
|
||||||
# via
|
|
||||||
# huggingface-hub
|
|
||||||
# pyaml
|
|
||||||
referencing==0.36.2
|
|
||||||
# via
|
|
||||||
# jsonschema
|
|
||||||
# jsonschema-specifications
|
|
||||||
regex==2024.11.6
|
|
||||||
# via tiktoken
|
|
||||||
requests==2.32.4
|
|
||||||
# via
|
|
||||||
# huggingface-hub
|
|
||||||
# llama-stack-client
|
|
||||||
# opentelemetry-exporter-otlp-proto-http
|
|
||||||
# tiktoken
|
|
||||||
rich==13.9.4
|
|
||||||
# via
|
|
||||||
# llama-stack
|
|
||||||
# llama-stack-client
|
|
||||||
rpds-py==0.22.3
|
|
||||||
# via
|
|
||||||
# jsonschema
|
|
||||||
# referencing
|
|
||||||
rsa==4.9
|
|
||||||
# via python-jose
|
|
||||||
six==1.17.0
|
|
||||||
# via
|
|
||||||
# ecdsa
|
|
||||||
# python-dateutil
|
|
||||||
sniffio==1.3.1
|
|
||||||
# via
|
|
||||||
# anyio
|
|
||||||
# llama-api-client
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
starlette==0.45.3
|
|
||||||
# via
|
|
||||||
# fastapi
|
|
||||||
# llama-stack
|
|
||||||
termcolor==2.5.0
|
|
||||||
# via
|
|
||||||
# fire
|
|
||||||
# llama-stack
|
|
||||||
# llama-stack-client
|
|
||||||
tiktoken==0.9.0
|
|
||||||
# via llama-stack
|
|
||||||
tqdm==4.67.1
|
|
||||||
# via
|
|
||||||
# huggingface-hub
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
typing-extensions==4.12.2
|
|
||||||
# via
|
|
||||||
# aiosqlite
|
|
||||||
# anyio
|
|
||||||
# fastapi
|
|
||||||
# huggingface-hub
|
|
||||||
# llama-api-client
|
|
||||||
# llama-stack-client
|
|
||||||
# openai
|
|
||||||
# opentelemetry-sdk
|
|
||||||
# pydantic
|
|
||||||
# pydantic-core
|
|
||||||
# referencing
|
|
||||||
tzdata==2025.1
|
|
||||||
# via pandas
|
|
||||||
urllib3==2.5.0
|
|
||||||
# via requests
|
|
||||||
uvicorn==0.34.0
|
|
||||||
# via llama-stack
|
|
||||||
wcwidth==0.2.13
|
|
||||||
# via prompt-toolkit
|
|
||||||
wrapt==1.17.2
|
|
||||||
# via deprecated
|
|
||||||
yarl==1.18.3
|
|
||||||
# via aiohttp
|
|
||||||
zipp==3.21.0
|
|
||||||
# via importlib-metadata
|
|
Loading…
Add table
Add a link
Reference in a new issue