diff --git a/pyproject.toml b/pyproject.toml index 4f350e2e9..d74b1dd04 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "llama_stack" -version = "0.2.2rc1" +version = "0.2.2" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] description = "Llama Stack" readme = "README.md" @@ -27,7 +27,7 @@ dependencies = [ "huggingface-hub", "jinja2>=3.1.6", "jsonschema", - "llama-stack-client>=0.2.2rc1", + "llama-stack-client>=0.2.2", "openai>=1.66", "prompt-toolkit", "python-dotenv", @@ -93,7 +93,7 @@ codegen = ["rich", "pydantic", "jinja2>=3.1.6"] ui = [ "streamlit", "pandas", - "llama-stack-client>=0.2.2rc1", + "llama-stack-client>=0.2.2", "streamlit-option-menu", ] diff --git a/requirements.txt b/requirements.txt index ef5782905..947d023d4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,59 +1,171 @@ # This file was autogenerated by uv via the following command: # uv export --frozen --no-hashes --no-emit-project --output-file=requirements.txt annotated-types==0.7.0 + # via pydantic anyio==4.8.0 + # via + # httpx + # llama-stack-client + # openai attrs==25.1.0 + # via + # jsonschema + # referencing blobfile==3.0.0 + # via llama-stack certifi==2025.1.31 + # via + # httpcore + # httpx + # requests charset-normalizer==3.4.1 + # via requests click==8.1.8 + # via llama-stack-client colorama==0.4.6 ; sys_platform == 'win32' + # via + # click + # tqdm distro==1.9.0 + # via + # llama-stack-client + # openai exceptiongroup==1.2.2 ; python_full_version < '3.11' + # via anyio filelock==3.17.0 + # via + # blobfile + # huggingface-hub fire==0.7.0 + # via llama-stack fsspec==2024.12.0 + # via huggingface-hub h11==0.14.0 + # via httpcore httpcore==1.0.7 + # via httpx httpx==0.28.1 + # via + # llama-stack + # llama-stack-client + # openai huggingface-hub==0.29.0 + # via llama-stack idna==3.10 + # via + # anyio + # httpx + # requests jinja2==3.1.6 + # via llama-stack jiter==0.8.2 + # via openai jsonschema==4.23.0 + # via llama-stack jsonschema-specifications==2024.10.1 + # via jsonschema llama-stack-client==0.2.1 + # via llama-stack lxml==5.3.1 + # via blobfile markdown-it-py==3.0.0 + # via rich markupsafe==3.0.2 + # via jinja2 mdurl==0.1.2 + # via markdown-it-py numpy==2.2.3 + # via pandas openai==1.71.0 + # via llama-stack packaging==24.2 + # via huggingface-hub pandas==2.2.3 + # via llama-stack-client pillow==11.1.0 + # via llama-stack prompt-toolkit==3.0.50 + # via + # llama-stack + # llama-stack-client pyaml==25.1.0 + # via llama-stack-client pycryptodomex==3.21.0 + # via blobfile pydantic==2.10.6 + # via + # llama-stack + # llama-stack-client + # openai pydantic-core==2.27.2 + # via pydantic pygments==2.19.1 + # via rich python-dateutil==2.9.0.post0 + # via pandas python-dotenv==1.0.1 + # via llama-stack pytz==2025.1 + # via pandas pyyaml==6.0.2 + # via + # huggingface-hub + # pyaml referencing==0.36.2 + # via + # jsonschema + # jsonschema-specifications regex==2024.11.6 + # via tiktoken requests==2.32.3 + # via + # huggingface-hub + # llama-stack + # tiktoken rich==13.9.4 + # via + # llama-stack + # llama-stack-client rpds-py==0.22.3 + # via + # jsonschema + # referencing setuptools==75.8.0 + # via llama-stack six==1.17.0 + # via python-dateutil sniffio==1.3.1 + # via + # anyio + # llama-stack-client + # openai termcolor==2.5.0 + # via + # fire + # llama-stack + # llama-stack-client tiktoken==0.9.0 + # via llama-stack tqdm==4.67.1 + # via + # huggingface-hub + # llama-stack-client + # openai typing-extensions==4.12.2 + # via + # anyio + # huggingface-hub + # llama-stack-client + # openai + # pydantic + # pydantic-core + # referencing + # rich tzdata==2025.1 + # via pandas urllib3==2.3.0 + # via + # blobfile + # requests wcwidth==0.2.13 + # via prompt-toolkit