mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 13:00:39 +00:00
fix: Post Training Model change in Tests in order to make it less intensive (#2991)
# What does this PR do? Changed from` ibm-granite/granite-3.3-2b-instruct` to` HuggingFaceTB/SmolLM2-135M-Instruct` so it as not resource intensive in CI Idea came from - https://github.com/meta-llama/llama-stack/pull/2984#issuecomment-3140400830
This commit is contained in:
parent
ffb6306fbd
commit
b41d696e4f
1 changed files with 1 additions and 2 deletions
|
@ -70,7 +70,6 @@ class TestPostTraining:
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.timeout(360) # 6 minutes timeout
|
@pytest.mark.timeout(360) # 6 minutes timeout
|
||||||
@skip_because_resource_intensive
|
|
||||||
def test_supervised_fine_tune(self, llama_stack_client, purpose, source):
|
def test_supervised_fine_tune(self, llama_stack_client, purpose, source):
|
||||||
logger.info("Starting supervised fine-tuning test")
|
logger.info("Starting supervised fine-tuning test")
|
||||||
|
|
||||||
|
@ -111,7 +110,7 @@ class TestPostTraining:
|
||||||
# train with HF trl SFTTrainer as the default
|
# train with HF trl SFTTrainer as the default
|
||||||
_ = llama_stack_client.post_training.supervised_fine_tune(
|
_ = llama_stack_client.post_training.supervised_fine_tune(
|
||||||
job_uuid=job_uuid,
|
job_uuid=job_uuid,
|
||||||
model="ibm-granite/granite-3.3-2b-instruct",
|
model="HuggingFaceTB/SmolLM2-135M-Instruct", # smaller model that supports the current sft recipe
|
||||||
algorithm_config=algorithm_config,
|
algorithm_config=algorithm_config,
|
||||||
training_config=training_config,
|
training_config=training_config,
|
||||||
hyperparam_search_config={},
|
hyperparam_search_config={},
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue