mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-31 08:00:09 +00:00
chore(ci): disable post training tests (#2953)
Post training tests need _much_ better thinking before we can re-enable them to be run on every single PR. Running periodically should be approached only when it is shown that the tests are reliable and as light-weight as can be; otherwise, it is just kicking the can down the road.
This commit is contained in:
parent
072d20a124
commit
81c7d6fa2e
1 changed files with 10 additions and 0 deletions
|
@ -22,6 +22,15 @@ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
skip_because_resource_intensive = pytest.mark.skip(
|
||||
reason="""
|
||||
Post training tests are extremely resource intensive. They download large models and partly as a result,
|
||||
are very slow to run. We cannot run them on every single PR update. CI should be considered
|
||||
a scarce resource and properly utilitized.
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def capture_output(capsys):
|
||||
"""Fixture to capture and display output during test execution."""
|
||||
|
@ -57,6 +66,7 @@ class TestPostTraining:
|
|||
],
|
||||
)
|
||||
@pytest.mark.timeout(360) # 6 minutes timeout
|
||||
@skip_because_resource_intensive
|
||||
def test_supervised_fine_tune(self, llama_stack_client, purpose, source):
|
||||
logger.info("Starting supervised fine-tuning test")
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue