diff --git a/llama_stack/ui/package.json b/llama_stack/ui/package.json index 9d448998c..4ca94a64e 100644 --- a/llama_stack/ui/package.json +++ b/llama_stack/ui/package.json @@ -20,7 +20,7 @@ "@radix-ui/react-tooltip": "^1.2.6", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", - "llama-stack-client": "^0.2.15", + "llama-stack-client": ""0.2.16", "lucide-react": "^0.510.0", "next": "15.3.3", "next-auth": "^4.24.11", diff --git a/pyproject.toml b/pyproject.toml index ad4bb7314..a5dbd9e17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "llama_stack" -version = "0.2.15" +version = "0.2.16" authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] description = "Llama Stack" readme = "README.md" @@ -28,7 +28,7 @@ dependencies = [ "huggingface-hub>=0.34.0,<1.0", "jinja2>=3.1.6", "jsonschema", - "llama-stack-client>=0.2.15", + "llama-stack-client>=0.2.16", "llama-api-client>=0.1.2", "openai>=1.66", "prompt-toolkit", @@ -53,7 +53,7 @@ dependencies = [ ui = [ "streamlit", "pandas", - "llama-stack-client>=0.2.15", + "llama-stack-client>=0.2.16", "streamlit-option-menu", ] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..25a3f9ba3 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,272 @@ +# This file was autogenerated by uv via the following command: +# uv export --frozen --no-hashes --no-emit-project --no-default-groups --output-file=requirements.txt +aiohappyeyeballs==2.5.0 + # via aiohttp +aiohttp==3.12.13 + # via llama-stack +aiosignal==1.3.2 + # via aiohttp +aiosqlite==0.21.0 + # via llama-stack +annotated-types==0.7.0 + # via pydantic +anyio==4.8.0 + # via + # httpx + # llama-api-client + # llama-stack-client + # openai + # starlette +asyncpg==0.30.0 + # via llama-stack +attrs==25.1.0 + # via + # aiohttp + # jsonschema + # referencing +certifi==2025.1.31 + # via + # httpcore + # httpx + # requests +cffi==1.17.1 ; platform_python_implementation != 'PyPy' + # via cryptography +charset-normalizer==3.4.1 + # via requests +click==8.1.8 + # via + # llama-stack-client + # uvicorn +colorama==0.4.6 ; sys_platform == 'win32' + # via + # click + # tqdm +cryptography==45.0.5 + # via python-jose +deprecated==1.2.18 + # via + # opentelemetry-api + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-semantic-conventions +distro==1.9.0 + # via + # llama-api-client + # llama-stack-client + # openai +ecdsa==0.19.1 + # via python-jose +fastapi==0.115.8 + # via llama-stack +filelock==3.17.0 + # via huggingface-hub +fire==0.7.0 + # via + # llama-stack + # llama-stack-client +frozenlist==1.5.0 + # via + # aiohttp + # aiosignal +fsspec==2024.12.0 + # via huggingface-hub +googleapis-common-protos==1.67.0 + # via opentelemetry-exporter-otlp-proto-http +h11==0.16.0 + # via + # httpcore + # llama-stack + # uvicorn +hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64' + # via huggingface-hub +httpcore==1.0.9 + # via httpx +httpx==0.28.1 + # via + # llama-api-client + # llama-stack + # llama-stack-client + # openai +huggingface-hub==0.34.1 + # via llama-stack +idna==3.10 + # via + # anyio + # httpx + # requests + # yarl +importlib-metadata==8.5.0 + # via opentelemetry-api +jinja2==3.1.6 + # via llama-stack +jiter==0.8.2 + # via openai +jsonschema==4.23.0 + # via llama-stack +jsonschema-specifications==2024.10.1 + # via jsonschema +llama-api-client==0.1.2 + # via llama-stack +llama-stack-client==0.2.16 + # via llama-stack +markdown-it-py==3.0.0 + # via rich +markupsafe==3.0.2 + # via jinja2 +mdurl==0.1.2 + # via markdown-it-py +multidict==6.1.0 + # via + # aiohttp + # yarl +numpy==2.2.3 + # via pandas +openai==1.71.0 + # via llama-stack +opentelemetry-api==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-http + # opentelemetry-sdk + # opentelemetry-semantic-conventions +opentelemetry-exporter-otlp-proto-common==1.30.0 + # via opentelemetry-exporter-otlp-proto-http +opentelemetry-exporter-otlp-proto-http==1.30.0 + # via llama-stack +opentelemetry-proto==1.30.0 + # via + # opentelemetry-exporter-otlp-proto-common + # opentelemetry-exporter-otlp-proto-http +opentelemetry-sdk==1.30.0 + # via + # llama-stack + # opentelemetry-exporter-otlp-proto-http +opentelemetry-semantic-conventions==0.51b0 + # via opentelemetry-sdk +packaging==24.2 + # via huggingface-hub +pandas==2.2.3 + # via llama-stack-client +pillow==11.1.0 + # via llama-stack +prompt-toolkit==3.0.50 + # via + # llama-stack + # llama-stack-client +propcache==0.3.0 + # via + # aiohttp + # yarl +protobuf==5.29.5 + # via + # googleapis-common-protos + # opentelemetry-proto +pyaml==25.1.0 + # via llama-stack-client +pyasn1==0.4.8 + # via + # python-jose + # rsa +pycparser==2.22 ; platform_python_implementation != 'PyPy' + # via cffi +pydantic==2.11.7 + # via + # fastapi + # llama-api-client + # llama-stack + # llama-stack-client + # openai +pydantic-core==2.33.2 + # via pydantic +pygments==2.19.1 + # via rich +python-dateutil==2.9.0.post0 + # via pandas +python-dotenv==1.0.1 + # via llama-stack +python-jose==3.4.0 + # via llama-stack +python-multipart==0.0.20 + # via llama-stack +pytz==2025.1 + # via pandas +pyyaml==6.0.2 + # via + # huggingface-hub + # pyaml +referencing==0.36.2 + # via + # jsonschema + # jsonschema-specifications +regex==2024.11.6 + # via tiktoken +requests==2.32.4 + # via + # huggingface-hub + # llama-stack-client + # opentelemetry-exporter-otlp-proto-http + # tiktoken +rich==13.9.4 + # via + # llama-stack + # llama-stack-client +rpds-py==0.22.3 + # via + # jsonschema + # referencing +rsa==4.9 + # via python-jose +six==1.17.0 + # via + # ecdsa + # python-dateutil +sniffio==1.3.1 + # via + # anyio + # llama-api-client + # llama-stack-client + # openai +starlette==0.45.3 + # via + # fastapi + # llama-stack +termcolor==2.5.0 + # via + # fire + # llama-stack + # llama-stack-client +tiktoken==0.9.0 + # via llama-stack +tqdm==4.67.1 + # via + # huggingface-hub + # llama-stack-client + # openai +typing-extensions==4.12.2 + # via + # aiosqlite + # anyio + # fastapi + # huggingface-hub + # llama-api-client + # llama-stack-client + # openai + # opentelemetry-sdk + # pydantic + # pydantic-core + # referencing + # typing-inspection +typing-inspection==0.4.1 + # via pydantic +tzdata==2025.1 + # via pandas +urllib3==2.5.0 + # via requests +uvicorn==0.34.0 + # via llama-stack +wcwidth==0.2.13 + # via prompt-toolkit +wrapt==1.17.2 + # via deprecated +yarl==1.18.3 + # via aiohttp +zipp==3.21.0 + # via importlib-metadata diff --git a/uv.lock b/uv.lock index 8b5f04998..99852b730 100644 --- a/uv.lock +++ b/uv.lock @@ -1512,7 +1512,7 @@ wheels = [ [[package]] name = "llama-stack" -version = "0.2.15" +version = "0.2.16" source = { editable = "." } dependencies = [ { name = "aiohttp" }, @@ -1642,8 +1642,8 @@ requires-dist = [ { name = "jinja2", specifier = ">=3.1.6" }, { name = "jsonschema" }, { name = "llama-api-client", specifier = ">=0.1.2" }, - { name = "llama-stack-client", specifier = ">=0.2.15" }, - { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.15" }, + { name = "llama-stack-client", specifier = ">=0.2.16" }, + { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.16" }, { name = "openai", specifier = ">=1.66" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" }, { name = "opentelemetry-sdk", specifier = ">=1.30.0" }, @@ -1744,7 +1744,7 @@ unit = [ [[package]] name = "llama-stack-client" -version = "0.2.15" +version = "0.2.16" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1763,9 +1763,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d4/48/94fbe3d578fe2a1255397c888bbea3357f5af7c5c1468ac35814168177e9/llama_stack_client-0.2.15.tar.gz", hash = "sha256:745c1d1fbbf627c99cdbf5b4c6f7416fe4316971d5ada1ec3a0b122d6b8cc8a0", size = 257646, upload-time = "2025-07-15T23:25:47.192Z" } +sdist = { url = "https://files.pythonhosted.org/packages/db/28/74ae2faae9af51205587b33fcf2f99a8af090de7aa4122701f2f70f04233/llama_stack_client-0.2.16.tar.gz", hash = "sha256:24294acc6bf40e79900a62f4fa61009acb9af7028b198b12c0ba8adab25c2049", size = 257642, upload-time = "2025-07-28T23:13:22.793Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/81/ae/5a404956117bb1fe81ea56b00cb953f3027270a995ee3f7a8fbb19640d07/llama_stack_client-0.2.15-py3-none-any.whl", hash = "sha256:ab0a0712076bf87ce5c20a266af056ac73446248fed3b5d3fe226f9f8a10ce3d", size = 350329, upload-time = "2025-07-15T23:25:45.812Z" }, + { url = "https://files.pythonhosted.org/packages/30/ec/1874120a15b22f3a88d4e49700c870cc6540bc8c709a841db79a662d7949/llama_stack_client-0.2.16-py3-none-any.whl", hash = "sha256:5c0d13e6ac40143ce01cae4eec65fb39fe24e11f54b86afbd20f0033c38f83c0", size = 350329, upload-time = "2025-07-28T23:13:21.586Z" }, ] [[package]]