llama-stack-mirror/requirements.txt
Nathan Weinberg 2a599d9a48 test: add integration test for OpenAI Chat Completion file support
Signed-off-by: Nathan Weinberg <nweinber@redhat.com>
2025-07-24 20:31:39 -04:00

270 lines
5.6 KiB
Text

# This file was autogenerated by uv via the following command:
# uv export --frozen --no-hashes --no-emit-project --no-default-groups --output-file=requirements.txt
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.12.14
# via llama-stack
aiosignal==1.4.0
# via aiohttp
aiosqlite==0.21.0
# via llama-stack
annotated-types==0.7.0
# via pydantic
anyio==4.9.0
# via
# httpx
# llama-api-client
# llama-stack-client
# openai
# starlette
asyncpg==0.30.0
# via llama-stack
attrs==25.3.0
# via
# aiohttp
# jsonschema
# referencing
certifi==2025.7.14
# via
# httpcore
# httpx
# requests
cffi==1.17.1 ; platform_python_implementation != 'PyPy'
# via cryptography
charset-normalizer==3.4.2
# via requests
click==8.2.1
# via
# llama-stack-client
# uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via
# click
# tqdm
cryptography==45.0.5
# via python-jose
distro==1.9.0
# via
# llama-api-client
# llama-stack-client
# openai
ecdsa==0.19.1
# via python-jose
fastapi==0.116.1
# via llama-stack
filelock==3.18.0
# via huggingface-hub
fire==0.7.0
# via
# llama-stack
# llama-stack-client
frozenlist==1.7.0
# via
# aiohttp
# aiosignal
fsspec==2025.3.0
# via huggingface-hub
googleapis-common-protos==1.70.0
# via opentelemetry-exporter-otlp-proto-http
h11==0.16.0
# via
# httpcore
# llama-stack
# uvicorn
hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
# via huggingface-hub
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
# llama-api-client
# llama-stack
# llama-stack-client
# openai
huggingface-hub==0.33.5
# via llama-stack
idna==3.10
# via
# anyio
# httpx
# requests
# yarl
importlib-metadata==8.7.0
# via opentelemetry-api
jinja2==3.1.6
# via llama-stack
jiter==0.10.0
# via openai
jsonschema==4.25.0
# via llama-stack
jsonschema-specifications==2025.4.1
# via jsonschema
llama-api-client==0.1.2
# via llama-stack
llama-stack-client==0.2.15
# via llama-stack
markdown-it-py==3.0.0
# via rich
markupsafe==3.0.2
# via jinja2
mdurl==0.1.2
# via markdown-it-py
multidict==6.6.3
# via
# aiohttp
# yarl
numpy==2.3.1
# via pandas
openai==1.97.1
# via llama-stack
opentelemetry-api==1.35.0
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
# opentelemetry-semantic-conventions
opentelemetry-exporter-otlp-proto-common==1.35.0
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-http==1.35.0
# via llama-stack
opentelemetry-proto==1.35.0
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-sdk==1.35.0
# via
# llama-stack
# opentelemetry-exporter-otlp-proto-http
opentelemetry-semantic-conventions==0.56b0
# via opentelemetry-sdk
packaging==25.0
# via huggingface-hub
pandas==2.3.1
# via llama-stack-client
pillow==11.3.0
# via llama-stack
prompt-toolkit==3.0.51
# via
# llama-stack
# llama-stack-client
propcache==0.3.2
# via
# aiohttp
# yarl
protobuf==6.31.1
# via
# googleapis-common-protos
# opentelemetry-proto
pyaml==25.7.0
# via llama-stack-client
pyasn1==0.6.1
# via
# python-jose
# rsa
pycparser==2.22 ; platform_python_implementation != 'PyPy'
# via cffi
pydantic==2.11.7
# via
# fastapi
# llama-api-client
# llama-stack
# llama-stack-client
# openai
pydantic-core==2.33.2
# via pydantic
pygments==2.19.2
# via rich
python-dateutil==2.9.0.post0
# via pandas
python-dotenv==1.1.1
# via llama-stack
python-jose==3.5.0
# via llama-stack
python-multipart==0.0.20
# via llama-stack
pytz==2025.2
# via pandas
pyyaml==6.0.2
# via
# huggingface-hub
# pyaml
referencing==0.36.2
# via
# jsonschema
# jsonschema-specifications
regex==2024.11.6
# via tiktoken
requests==2.32.4
# via
# huggingface-hub
# llama-stack-client
# opentelemetry-exporter-otlp-proto-http
# tiktoken
rich==14.0.0
# via
# llama-stack
# llama-stack-client
rpds-py==0.26.0
# via
# jsonschema
# referencing
rsa==4.9.1
# via python-jose
six==1.17.0
# via
# ecdsa
# python-dateutil
sniffio==1.3.1
# via
# anyio
# llama-api-client
# llama-stack-client
# openai
starlette==0.47.2
# via
# fastapi
# llama-stack
termcolor==3.1.0
# via
# fire
# llama-stack
# llama-stack-client
tiktoken==0.9.0
# via llama-stack
tqdm==4.67.1
# via
# huggingface-hub
# llama-stack-client
# openai
typing-extensions==4.14.1
# via
# aiosignal
# aiosqlite
# anyio
# fastapi
# huggingface-hub
# llama-api-client
# llama-stack-client
# openai
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# referencing
# starlette
# typing-inspection
typing-inspection==0.4.1
# via pydantic
tzdata==2025.2
# via pandas
urllib3==2.5.0
# via requests
uvicorn==0.35.0
# via llama-stack
wcwidth==0.2.13
# via prompt-toolkit
yarl==1.20.1
# via aiohttp
zipp==3.23.0
# via importlib-metadata