diff --git a/pyproject.toml b/pyproject.toml index 88c331b78..ba7c2300a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "llama_stack" -version = "0.2.6" +version = "0.2.7" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] description = "Llama Stack" readme = "README.md" @@ -27,7 +27,7 @@ dependencies = [ "huggingface-hub", "jinja2>=3.1.6", "jsonschema", - "llama-stack-client>=0.2.6", + "llama-stack-client>=0.2.7", "openai>=1.66", "prompt-toolkit", "python-dotenv", @@ -106,7 +106,7 @@ codegen = ["rich", "pydantic", "jinja2>=3.1.6"] ui = [ "streamlit", "pandas", - "llama-stack-client>=0.2.6", + "llama-stack-client>=0.2.7", "streamlit-option-menu", ] diff --git a/requirements.txt b/requirements.txt index 1a755bae0..0857a9886 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,69 +1,206 @@ # This file was autogenerated by uv via the following command: # uv export --frozen --no-hashes --no-emit-project --output-file=requirements.txt annotated-types==0.7.0 + # via pydantic anyio==4.8.0 + # via + # httpx + # llama-stack-client + # openai attrs==25.1.0 + # via + # jsonschema + # referencing blobfile==3.0.0 + # via llama-stack cachetools==5.5.2 + # via google-auth certifi==2025.1.31 + # via + # httpcore + # httpx + # kubernetes + # requests charset-normalizer==3.4.1 + # via requests click==8.1.8 + # via llama-stack-client colorama==0.4.6 ; sys_platform == 'win32' + # via + # click + # tqdm distro==1.9.0 + # via + # llama-stack-client + # openai durationpy==0.9 + # via kubernetes exceptiongroup==1.2.2 ; python_full_version < '3.11' + # via anyio filelock==3.17.0 + # via + # blobfile + # huggingface-hub fire==0.7.0 + # via llama-stack fsspec==2024.12.0 + # via huggingface-hub google-auth==2.38.0 + # via kubernetes h11==0.16.0 + # via + # httpcore + # llama-stack httpcore==1.0.9 + # via httpx httpx==0.28.1 + # via + # llama-stack + # llama-stack-client + # openai huggingface-hub==0.29.0 + # via llama-stack idna==3.10 + # via + # anyio + # httpx + # requests jinja2==3.1.6 + # via llama-stack jiter==0.8.2 + # via openai jsonschema==4.23.0 + # via llama-stack jsonschema-specifications==2024.10.1 + # via jsonschema kubernetes==32.0.1 -llama-stack-client==0.2.6 + # via llama-stack +llama-stack-client==0.2.7 + # via llama-stack lxml==5.3.1 + # via blobfile markdown-it-py==3.0.0 + # via rich markupsafe==3.0.2 + # via jinja2 mdurl==0.1.2 + # via markdown-it-py numpy==2.2.3 + # via pandas oauthlib==3.2.2 + # via + # kubernetes + # requests-oauthlib openai==1.71.0 + # via llama-stack packaging==24.2 + # via huggingface-hub pandas==2.2.3 + # via llama-stack-client pillow==11.1.0 + # via llama-stack prompt-toolkit==3.0.50 + # via + # llama-stack + # llama-stack-client pyaml==25.1.0 + # via llama-stack-client pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa pyasn1-modules==0.4.2 + # via google-auth pycryptodomex==3.21.0 + # via blobfile pydantic==2.10.6 + # via + # llama-stack + # llama-stack-client + # openai pydantic-core==2.27.2 + # via pydantic pygments==2.19.1 + # via rich python-dateutil==2.9.0.post0 + # via + # kubernetes + # pandas python-dotenv==1.0.1 + # via llama-stack pytz==2025.1 + # via pandas pyyaml==6.0.2 + # via + # huggingface-hub + # kubernetes + # pyaml referencing==0.36.2 + # via + # jsonschema + # jsonschema-specifications regex==2024.11.6 + # via tiktoken requests==2.32.3 + # via + # huggingface-hub + # kubernetes + # llama-stack + # requests-oauthlib + # tiktoken requests-oauthlib==2.0.0 + # via kubernetes rich==13.9.4 + # via + # llama-stack + # llama-stack-client rpds-py==0.22.3 + # via + # jsonschema + # referencing rsa==4.9 + # via google-auth setuptools==75.8.0 + # via llama-stack six==1.17.0 + # via + # kubernetes + # python-dateutil sniffio==1.3.1 + # via + # anyio + # llama-stack-client + # openai termcolor==2.5.0 + # via + # fire + # llama-stack + # llama-stack-client tiktoken==0.9.0 + # via llama-stack tqdm==4.67.1 + # via + # huggingface-hub + # llama-stack-client + # openai typing-extensions==4.12.2 + # via + # anyio + # huggingface-hub + # llama-stack-client + # openai + # pydantic + # pydantic-core + # referencing + # rich tzdata==2025.1 + # via pandas urllib3==2.3.0 + # via + # blobfile + # kubernetes + # requests wcwidth==0.2.13 + # via prompt-toolkit websocket-client==1.8.0 + # via kubernetes diff --git a/uv.lock b/uv.lock index 048e6e202..dbf0c891f 100644 --- a/uv.lock +++ b/uv.lock @@ -1419,7 +1419,7 @@ wheels = [ [[package]] name = "llama-stack" -version = "0.2.6" +version = "0.2.7" source = { editable = "." } dependencies = [ { name = "blobfile" }, @@ -1533,8 +1533,8 @@ requires-dist = [ { name = "jinja2", marker = "extra == 'codegen'", specifier = ">=3.1.6" }, { name = "jsonschema" }, { name = "kubernetes" }, - { name = "llama-stack-client", specifier = ">=0.2.6" }, - { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.6" }, + { name = "llama-stack-client", specifier = ">=0.2.7" }, + { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.7" }, { name = "mcp", marker = "extra == 'test'" }, { name = "myst-parser", marker = "extra == 'docs'" }, { name = "nbval", marker = "extra == 'dev'" }, @@ -1591,7 +1591,7 @@ provides-extras = ["dev", "unit", "test", "docs", "codegen", "ui"] [[package]] name = "llama-stack-client" -version = "0.2.6" +version = "0.2.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1608,9 +1608,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/14/1e/14e549b5fb7ac09347686f6f1c28ee2bcd16cf575aab934687f20b5cec12/llama_stack_client-0.2.6.tar.gz", hash = "sha256:a03a2b0bd43bdb0083378f481614bb65592d7f669a821d0b618b1dfc7d1c8325", size = 259270, upload-time = "2025-05-12T18:01:30.537Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/6b/31c07396c5b3010668e4eb38061a96ffacb47ec4b14d8aeb64c13856c485/llama_stack_client-0.2.7.tar.gz", hash = "sha256:11aee11fdd5e0e8caad07c0cce9c4d88640938844372e7e3453a91ea0757fcb3", size = 259273, upload-time = "2025-05-16T20:31:39.221Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/af/93895ce23d3c8e676004e7b69deaea726e81acaaf9cf00090baace904c03/llama_stack_client-0.2.6-py3-none-any.whl", hash = "sha256:9f39dea2dba6767b654d5119f99dfc2b89f838470a547bc5d8def5a230decfcd", size = 292726, upload-time = "2025-05-12T18:01:29.14Z" }, + { url = "https://files.pythonhosted.org/packages/ac/69/6a5f4683afe355500df4376fdcbfb2fc1e6a0c3bcea5ff8f6114773a9acf/llama_stack_client-0.2.7-py3-none-any.whl", hash = "sha256:78b3f2abdb1770c7b1270a9c0ef58402a988401c564d2e6c83588779ac6fc38d", size = 292727, upload-time = "2025-05-16T20:31:37.587Z" }, ] [[package]]