From 58ffd82853c2ee611bea441d0d96edf34af578e0 Mon Sep 17 00:00:00 2001 From: Nehanth Narendrula Date: Tue, 29 Jul 2025 14:14:04 -0400 Subject: [PATCH] fix: Update SFTConfig parameter to fix CI and Post Training Workflow (#2948) # What does this PR do? - Change max_seq_length to max_length in SFTConfig constructor - TRL deprecated max_seq_length in Feb 2024 and removed it in v0.20.0 - Reference: https://github.com/huggingface/trl/pull/2895 This resolves the SFT training failure in CI tests --- .../post_training/huggingface/recipes/finetune_single_device.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py b/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py index ed9cd7755..2a024eb25 100644 --- a/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +++ b/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py @@ -469,7 +469,7 @@ class HFFinetuningSingleDevice: use_cpu=True if device.type == "cpu" and not torch.backends.mps.is_available() else False, save_strategy=save_strategy, report_to="none", - max_seq_length=provider_config.max_seq_length, + max_length=provider_config.max_seq_length, gradient_accumulation_steps=config.gradient_accumulation_steps, gradient_checkpointing=provider_config.gradient_checkpointing, learning_rate=lr,