mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-17 18:38:11 +00:00
build: Bump version to 0.2.15
This commit is contained in:
parent
b096794959
commit
95fdc8ea94
4 changed files with 1527 additions and 1703 deletions
|
@ -20,7 +20,7 @@
|
|||
"@radix-ui/react-tooltip": "^1.2.6",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"llama-stack-client": "^0.2.14",
|
||||
"llama-stack-client": ""0.2.15",
|
||||
"lucide-react": "^0.510.0",
|
||||
"next": "15.3.3",
|
||||
"next-auth": "^4.24.11",
|
||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||
|
||||
[project]
|
||||
name = "llama_stack"
|
||||
version = "0.2.14"
|
||||
version = "0.2.15"
|
||||
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
||||
description = "Llama Stack"
|
||||
readme = "README.md"
|
||||
|
@ -28,7 +28,7 @@ dependencies = [
|
|||
"huggingface-hub>=0.30.0,<1.0",
|
||||
"jinja2>=3.1.6",
|
||||
"jsonschema",
|
||||
"llama-stack-client>=0.2.14",
|
||||
"llama-stack-client>=0.2.15",
|
||||
"openai>=1.66",
|
||||
"prompt-toolkit",
|
||||
"python-dotenv",
|
||||
|
@ -52,7 +52,7 @@ dependencies = [
|
|||
ui = [
|
||||
"streamlit",
|
||||
"pandas",
|
||||
"llama-stack-client>=0.2.14",
|
||||
"llama-stack-client>=0.2.15",
|
||||
"streamlit-option-menu",
|
||||
]
|
||||
|
||||
|
|
176
requirements.txt
176
requirements.txt
|
@ -1,261 +1,87 @@
|
|||
# This file was autogenerated by uv via the following command:
|
||||
# uv export --frozen --no-hashes --no-emit-project --no-default-groups --output-file=requirements.txt
|
||||
aiohappyeyeballs==2.5.0
|
||||
# via aiohttp
|
||||
aiohttp==3.12.13
|
||||
# via llama-stack
|
||||
aiosignal==1.3.2
|
||||
# via aiohttp
|
||||
aiosqlite==0.21.0
|
||||
# via llama-stack
|
||||
annotated-types==0.7.0
|
||||
# via pydantic
|
||||
anyio==4.8.0
|
||||
# via
|
||||
# httpx
|
||||
# llama-stack-client
|
||||
# openai
|
||||
# starlette
|
||||
asyncpg==0.30.0
|
||||
# via llama-stack
|
||||
attrs==25.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# jsonschema
|
||||
# referencing
|
||||
certifi==2025.1.31
|
||||
# via
|
||||
# httpcore
|
||||
# httpx
|
||||
# requests
|
||||
cffi==1.17.1 ; platform_python_implementation != 'PyPy'
|
||||
# via cryptography
|
||||
charset-normalizer==3.4.1
|
||||
# via requests
|
||||
click==8.1.8
|
||||
# via
|
||||
# llama-stack-client
|
||||
# uvicorn
|
||||
colorama==0.4.6 ; sys_platform == 'win32'
|
||||
# via
|
||||
# click
|
||||
# tqdm
|
||||
cryptography==45.0.5
|
||||
# via python-jose
|
||||
deprecated==1.2.18
|
||||
# via
|
||||
# opentelemetry-api
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# opentelemetry-semantic-conventions
|
||||
distro==1.9.0
|
||||
# via
|
||||
# llama-stack-client
|
||||
# openai
|
||||
ecdsa==0.19.1
|
||||
# via python-jose
|
||||
fastapi==0.115.8
|
||||
# via llama-stack
|
||||
filelock==3.17.0
|
||||
# via huggingface-hub
|
||||
fire==0.7.0
|
||||
# via
|
||||
# llama-stack
|
||||
# llama-stack-client
|
||||
frozenlist==1.5.0
|
||||
# via
|
||||
# aiohttp
|
||||
# aiosignal
|
||||
fsspec==2024.12.0
|
||||
# via huggingface-hub
|
||||
googleapis-common-protos==1.67.0
|
||||
# via opentelemetry-exporter-otlp-proto-http
|
||||
h11==0.16.0
|
||||
# via
|
||||
# httpcore
|
||||
# llama-stack
|
||||
# uvicorn
|
||||
hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
|
||||
# via huggingface-hub
|
||||
httpcore==1.0.9
|
||||
# via httpx
|
||||
httpx==0.28.1
|
||||
# via
|
||||
# llama-stack
|
||||
# llama-stack-client
|
||||
# openai
|
||||
huggingface-hub==0.33.0
|
||||
# via llama-stack
|
||||
idna==3.10
|
||||
# via
|
||||
# anyio
|
||||
# httpx
|
||||
# requests
|
||||
# yarl
|
||||
importlib-metadata==8.5.0
|
||||
# via opentelemetry-api
|
||||
jinja2==3.1.6
|
||||
# via llama-stack
|
||||
jiter==0.8.2
|
||||
# via openai
|
||||
jsonschema==4.23.0
|
||||
# via llama-stack
|
||||
jsonschema-specifications==2024.10.1
|
||||
# via jsonschema
|
||||
llama-stack-client==0.2.14
|
||||
# via llama-stack
|
||||
llama-stack-client==0.2.15
|
||||
markdown-it-py==3.0.0
|
||||
# via rich
|
||||
markupsafe==3.0.2
|
||||
# via jinja2
|
||||
mdurl==0.1.2
|
||||
# via markdown-it-py
|
||||
multidict==6.1.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
numpy==2.2.3
|
||||
# via pandas
|
||||
openai==1.71.0
|
||||
# via llama-stack
|
||||
opentelemetry-api==1.30.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# opentelemetry-sdk
|
||||
# opentelemetry-semantic-conventions
|
||||
opentelemetry-exporter-otlp-proto-common==1.30.0
|
||||
# via opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-exporter-otlp-proto-http==1.30.0
|
||||
# via llama-stack
|
||||
opentelemetry-proto==1.30.0
|
||||
# via
|
||||
# opentelemetry-exporter-otlp-proto-common
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-sdk==1.30.0
|
||||
# via
|
||||
# llama-stack
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
opentelemetry-semantic-conventions==0.51b0
|
||||
# via opentelemetry-sdk
|
||||
packaging==24.2
|
||||
# via huggingface-hub
|
||||
pandas==2.2.3
|
||||
# via llama-stack-client
|
||||
pillow==11.1.0
|
||||
# via llama-stack
|
||||
prompt-toolkit==3.0.50
|
||||
# via
|
||||
# llama-stack
|
||||
# llama-stack-client
|
||||
propcache==0.3.0
|
||||
# via
|
||||
# aiohttp
|
||||
# yarl
|
||||
protobuf==5.29.5
|
||||
# via
|
||||
# googleapis-common-protos
|
||||
# opentelemetry-proto
|
||||
pyaml==25.1.0
|
||||
# via llama-stack-client
|
||||
pyasn1==0.4.8
|
||||
# via
|
||||
# python-jose
|
||||
# rsa
|
||||
pycparser==2.22 ; platform_python_implementation != 'PyPy'
|
||||
# via cffi
|
||||
pydantic==2.10.6
|
||||
# via
|
||||
# fastapi
|
||||
# llama-stack
|
||||
# llama-stack-client
|
||||
# openai
|
||||
pydantic-core==2.27.2
|
||||
# via pydantic
|
||||
pygments==2.19.1
|
||||
# via rich
|
||||
python-dateutil==2.9.0.post0
|
||||
# via pandas
|
||||
python-dotenv==1.0.1
|
||||
# via llama-stack
|
||||
python-jose==3.4.0
|
||||
# via llama-stack
|
||||
python-multipart==0.0.20
|
||||
# via llama-stack
|
||||
pytz==2025.1
|
||||
# via pandas
|
||||
pyyaml==6.0.2
|
||||
# via
|
||||
# huggingface-hub
|
||||
# pyaml
|
||||
referencing==0.36.2
|
||||
# via
|
||||
# jsonschema
|
||||
# jsonschema-specifications
|
||||
regex==2024.11.6
|
||||
# via tiktoken
|
||||
requests==2.32.4
|
||||
# via
|
||||
# huggingface-hub
|
||||
# llama-stack-client
|
||||
# opentelemetry-exporter-otlp-proto-http
|
||||
# tiktoken
|
||||
rich==13.9.4
|
||||
# via
|
||||
# llama-stack
|
||||
# llama-stack-client
|
||||
rpds-py==0.22.3
|
||||
# via
|
||||
# jsonschema
|
||||
# referencing
|
||||
rsa==4.9
|
||||
# via python-jose
|
||||
six==1.17.0
|
||||
# via
|
||||
# ecdsa
|
||||
# python-dateutil
|
||||
sniffio==1.3.1
|
||||
# via
|
||||
# anyio
|
||||
# llama-stack-client
|
||||
# openai
|
||||
starlette==0.45.3
|
||||
# via
|
||||
# fastapi
|
||||
# llama-stack
|
||||
termcolor==2.5.0
|
||||
# via
|
||||
# fire
|
||||
# llama-stack
|
||||
# llama-stack-client
|
||||
tiktoken==0.9.0
|
||||
# via llama-stack
|
||||
tqdm==4.67.1
|
||||
# via
|
||||
# huggingface-hub
|
||||
# llama-stack-client
|
||||
# openai
|
||||
typing-extensions==4.12.2
|
||||
# via
|
||||
# aiosqlite
|
||||
# anyio
|
||||
# fastapi
|
||||
# huggingface-hub
|
||||
# llama-stack-client
|
||||
# openai
|
||||
# opentelemetry-sdk
|
||||
# pydantic
|
||||
# pydantic-core
|
||||
# referencing
|
||||
tzdata==2025.1
|
||||
# via pandas
|
||||
urllib3==2.5.0
|
||||
# via requests
|
||||
uvicorn==0.34.0
|
||||
# via llama-stack
|
||||
wcwidth==0.2.13
|
||||
# via prompt-toolkit
|
||||
wrapt==1.17.2
|
||||
# via deprecated
|
||||
yarl==1.18.3
|
||||
# via aiohttp
|
||||
zipp==3.21.0
|
||||
# via importlib-metadata
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue