From b41d696e4ff273329455311c6f0aed141ff2c84c Mon Sep 17 00:00:00 2001 From: Nehanth Narendrula Date: Thu, 31 Jul 2025 14:22:34 -0400 Subject: [PATCH] fix: Post Training Model change in Tests in order to make it less intensive (#2991) # What does this PR do? Changed from` ibm-granite/granite-3.3-2b-instruct` to` HuggingFaceTB/SmolLM2-135M-Instruct` so it as not resource intensive in CI Idea came from - https://github.com/meta-llama/llama-stack/pull/2984#issuecomment-3140400830 --- tests/integration/post_training/test_post_training.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/post_training/test_post_training.py b/tests/integration/post_training/test_post_training.py index 002da1160..f9c797593 100644 --- a/tests/integration/post_training/test_post_training.py +++ b/tests/integration/post_training/test_post_training.py @@ -70,7 +70,6 @@ class TestPostTraining: ], ) @pytest.mark.timeout(360) # 6 minutes timeout - @skip_because_resource_intensive def test_supervised_fine_tune(self, llama_stack_client, purpose, source): logger.info("Starting supervised fine-tuning test") @@ -111,7 +110,7 @@ class TestPostTraining: # train with HF trl SFTTrainer as the default _ = llama_stack_client.post_training.supervised_fine_tune( job_uuid=job_uuid, - model="ibm-granite/granite-3.3-2b-instruct", + model="HuggingFaceTB/SmolLM2-135M-Instruct", # smaller model that supports the current sft recipe algorithm_config=algorithm_config, training_config=training_config, hyperparam_search_config={},