fix: Post Training Model change in Tests in order to make it less intensive (#2991)

# What does this PR do?

Changed from` ibm-granite/granite-3.3-2b-instruct` to`
HuggingFaceTB/SmolLM2-135M-Instruct` so it as not resource intensive in
CI

Idea came from -
https://github.com/meta-llama/llama-stack/pull/2984#issuecomment-3140400830
This commit is contained in:
Nehanth Narendrula 2025-07-31 14:22:34 -04:00 committed by GitHub
parent ffb6306fbd
commit b41d696e4f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -70,7 +70,6 @@ class TestPostTraining:
], ],
) )
@pytest.mark.timeout(360) # 6 minutes timeout @pytest.mark.timeout(360) # 6 minutes timeout
@skip_because_resource_intensive
def test_supervised_fine_tune(self, llama_stack_client, purpose, source): def test_supervised_fine_tune(self, llama_stack_client, purpose, source):
logger.info("Starting supervised fine-tuning test") logger.info("Starting supervised fine-tuning test")
@ -111,7 +110,7 @@ class TestPostTraining:
# train with HF trl SFTTrainer as the default # train with HF trl SFTTrainer as the default
_ = llama_stack_client.post_training.supervised_fine_tune( _ = llama_stack_client.post_training.supervised_fine_tune(
job_uuid=job_uuid, job_uuid=job_uuid,
model="ibm-granite/granite-3.3-2b-instruct", model="HuggingFaceTB/SmolLM2-135M-Instruct", # smaller model that supports the current sft recipe
algorithm_config=algorithm_config, algorithm_config=algorithm_config,
training_config=training_config, training_config=training_config,
hyperparam_search_config={}, hyperparam_search_config={},