mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-16 05:52:37 +00:00
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 6s
Integration Tests / test-matrix (http, 3.11, datasets) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.11, providers) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.11, inspect) (push) Failing after 9s
Integration Tests / test-matrix (http, 3.11, tool_runtime) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.11, inference) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.12, agents) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.12, post_training) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.12, vector_io) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.11, scoring) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.11, agents) (push) Failing after 12s
Integration Tests / test-matrix (http, 3.11, post_training) (push) Failing after 11s
Integration Tests / test-matrix (http, 3.12, inference) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.12, scoring) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.12, datasets) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.12, providers) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.11, agents) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.11, inspect) (push) Failing after 6s
Integration Tests / test-matrix (http, 3.11, vector_io) (push) Failing after 12s
Integration Tests / test-matrix (http, 3.12, inspect) (push) Failing after 11s
Integration Tests / test-matrix (library, 3.11, inference) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, datasets) (push) Failing after 11s
Integration Tests / test-matrix (library, 3.11, post_training) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.11, vector_io) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, tool_runtime) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.11, providers) (push) Failing after 9s
Integration Tests / test-matrix (library, 3.12, inference) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.12, agents) (push) Failing after 9s
Integration Tests / test-matrix (library, 3.12, inspect) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.12, tool_runtime) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.12, post_training) (push) Failing after 8s
Test Llama Stack Build / generate-matrix (push) Successful in 7s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, scoring) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.12, vector_io) (push) Failing after 9s
Python Package Build Test / build (3.11) (push) Failing after 2s
Integration Tests / test-matrix (library, 3.12, scoring) (push) Failing after 10s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 4s
Integration Tests / test-matrix (library, 3.12, datasets) (push) Failing after 9s
Python Package Build Test / build (3.13) (push) Failing after 2s
Integration Tests / test-matrix (library, 3.12, providers) (push) Failing after 8s
Python Package Build Test / build (3.12) (push) Failing after 4s
Unit Tests / unit-tests (3.12) (push) Failing after 5s
Test External Providers / test-external-providers (venv) (push) Failing after 8s
Unit Tests / unit-tests (3.13) (push) Failing after 6s
Update ReadTheDocs / update-readthedocs (push) Failing after 11s
Unit Tests / unit-tests (3.11) (push) Failing after 13s
Test Llama Stack Build / build (push) Failing after 8s
Integration Tests / test-matrix (library, 3.12, tool_runtime) (push) Failing after 33s
Test Llama Stack Build / build-single-provider (push) Failing after 31s
Pre-commit / pre-commit (push) Successful in 1m12s
# What does this PR do?
CI tests have been failing with
.venv/lib/python3.12/site-packages/peft/auto.py:21: in <module>
from transformers import (
.venv/lib/python3.12/site-packages/transformers/__init__.py:27: in
<module>
from . import dependency_versions_check
.venv/lib/python3.12/site-packages/transformers/dependency_versions_check.py:57:
in <module>
require_version_core(deps[pkg])
.venv/lib/python3.12/site-packages/transformers/utils/versions.py:117:
in require_version_core
return require_version(requirement, hint)
.venv/lib/python3.12/site-packages/transformers/utils/versions.py:111:
in require_version
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
.venv/lib/python3.12/site-packages/transformers/utils/versions.py:44: in
_compare_versions
raise ImportError(
E ImportError: huggingface-hub>=0.30.0,<1.0 is required for a normal
functioning of this module, but found huggingface-hub==0.29.0.
E Try: `pip install transformers -U` or `pip install -e '.[dev]'` if
you're working with git main
------------------------------ Captured log setup
------------------------------
INFO llama_stack.providers.remote.inference.ollama.ollama:ollama.py:106
checking connectivity to Ollama at `http://0.0.0.0:11434`.../
=========================== short test summary info
============================
ERROR
tests/integration/providers/test_providers.py::TestProviders::test_providers
- ImportError: huggingface-hub>=0.30.0,<1.0 is required for a normal
functioning of this module, but found huggingface-hub==0.29.0.
Try: `pip install transformers -U` or `pip install -e '.[dev]'` if
you're working with git main
=================== 1 skipped, 4 warnings, 1 error in 9.52s
====================
## Test Plan
CI
207 lines
3.9 KiB
Text
207 lines
3.9 KiB
Text
# This file was autogenerated by uv via the following command:
|
|
# uv export --frozen --no-hashes --no-emit-project --no-default-groups --output-file=requirements.txt
|
|
aiohappyeyeballs==2.5.0
|
|
# via aiohttp
|
|
aiohttp==3.11.13
|
|
# via llama-stack
|
|
aiosignal==1.3.2
|
|
# via aiohttp
|
|
annotated-types==0.7.0
|
|
# via pydantic
|
|
anyio==4.8.0
|
|
# via
|
|
# httpx
|
|
# llama-stack-client
|
|
# openai
|
|
# starlette
|
|
attrs==25.1.0
|
|
# via
|
|
# aiohttp
|
|
# jsonschema
|
|
# referencing
|
|
certifi==2025.1.31
|
|
# via
|
|
# httpcore
|
|
# httpx
|
|
# requests
|
|
charset-normalizer==3.4.1
|
|
# via requests
|
|
click==8.1.8
|
|
# via llama-stack-client
|
|
colorama==0.4.6 ; sys_platform == 'win32'
|
|
# via
|
|
# click
|
|
# tqdm
|
|
distro==1.9.0
|
|
# via
|
|
# llama-stack-client
|
|
# openai
|
|
ecdsa==0.19.1
|
|
# via python-jose
|
|
fastapi==0.115.8
|
|
# via llama-stack
|
|
filelock==3.17.0
|
|
# via huggingface-hub
|
|
fire==0.7.0
|
|
# via llama-stack
|
|
frozenlist==1.5.0
|
|
# via
|
|
# aiohttp
|
|
# aiosignal
|
|
fsspec==2024.12.0
|
|
# via huggingface-hub
|
|
h11==0.16.0
|
|
# via
|
|
# httpcore
|
|
# llama-stack
|
|
hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
|
|
# via huggingface-hub
|
|
httpcore==1.0.9
|
|
# via httpx
|
|
httpx==0.28.1
|
|
# via
|
|
# llama-stack
|
|
# llama-stack-client
|
|
# openai
|
|
huggingface-hub==0.33.0
|
|
# via llama-stack
|
|
idna==3.10
|
|
# via
|
|
# anyio
|
|
# httpx
|
|
# requests
|
|
# yarl
|
|
jinja2==3.1.6
|
|
# via llama-stack
|
|
jiter==0.8.2
|
|
# via openai
|
|
jsonschema==4.23.0
|
|
# via llama-stack
|
|
jsonschema-specifications==2024.10.1
|
|
# via jsonschema
|
|
llama-stack-client==0.2.12
|
|
# via llama-stack
|
|
markdown-it-py==3.0.0
|
|
# via rich
|
|
markupsafe==3.0.2
|
|
# via jinja2
|
|
mdurl==0.1.2
|
|
# via markdown-it-py
|
|
multidict==6.1.0
|
|
# via
|
|
# aiohttp
|
|
# yarl
|
|
numpy==2.2.3
|
|
# via pandas
|
|
openai==1.71.0
|
|
# via llama-stack
|
|
packaging==24.2
|
|
# via huggingface-hub
|
|
pandas==2.2.3
|
|
# via llama-stack-client
|
|
pillow==11.1.0
|
|
# via llama-stack
|
|
prompt-toolkit==3.0.50
|
|
# via
|
|
# llama-stack
|
|
# llama-stack-client
|
|
propcache==0.3.0
|
|
# via
|
|
# aiohttp
|
|
# yarl
|
|
pyaml==25.1.0
|
|
# via llama-stack-client
|
|
pyasn1==0.4.8
|
|
# via
|
|
# python-jose
|
|
# rsa
|
|
pydantic==2.10.6
|
|
# via
|
|
# fastapi
|
|
# llama-stack
|
|
# llama-stack-client
|
|
# openai
|
|
pydantic-core==2.27.2
|
|
# via pydantic
|
|
pygments==2.19.1
|
|
# via rich
|
|
python-dateutil==2.9.0.post0
|
|
# via pandas
|
|
python-dotenv==1.0.1
|
|
# via llama-stack
|
|
python-jose==3.4.0
|
|
# via llama-stack
|
|
python-multipart==0.0.20
|
|
# via llama-stack
|
|
pytz==2025.1
|
|
# via pandas
|
|
pyyaml==6.0.2
|
|
# via
|
|
# huggingface-hub
|
|
# pyaml
|
|
referencing==0.36.2
|
|
# via
|
|
# jsonschema
|
|
# jsonschema-specifications
|
|
regex==2024.11.6
|
|
# via tiktoken
|
|
requests==2.32.4
|
|
# via
|
|
# huggingface-hub
|
|
# llama-stack
|
|
# tiktoken
|
|
rich==13.9.4
|
|
# via
|
|
# llama-stack
|
|
# llama-stack-client
|
|
rpds-py==0.22.3
|
|
# via
|
|
# jsonschema
|
|
# referencing
|
|
rsa==4.9
|
|
# via python-jose
|
|
setuptools==80.8.0
|
|
# via llama-stack
|
|
six==1.17.0
|
|
# via
|
|
# ecdsa
|
|
# python-dateutil
|
|
sniffio==1.3.1
|
|
# via
|
|
# anyio
|
|
# llama-stack-client
|
|
# openai
|
|
starlette==0.45.3
|
|
# via
|
|
# fastapi
|
|
# llama-stack
|
|
termcolor==2.5.0
|
|
# via
|
|
# fire
|
|
# llama-stack
|
|
# llama-stack-client
|
|
tiktoken==0.9.0
|
|
# via llama-stack
|
|
tqdm==4.67.1
|
|
# via
|
|
# huggingface-hub
|
|
# llama-stack-client
|
|
# openai
|
|
typing-extensions==4.12.2
|
|
# via
|
|
# anyio
|
|
# fastapi
|
|
# huggingface-hub
|
|
# llama-stack-client
|
|
# openai
|
|
# pydantic
|
|
# pydantic-core
|
|
# referencing
|
|
tzdata==2025.1
|
|
# via pandas
|
|
urllib3==2.3.0
|
|
# via requests
|
|
wcwidth==0.2.13
|
|
# via prompt-toolkit
|
|
yarl==1.18.3
|
|
# via aiohttp
|