From 3c510750aab65ffdd343f6145ded7574f9715849 Mon Sep 17 00:00:00 2001 From: Nehanth Date: Tue, 29 Jul 2025 18:00:50 +0000 Subject: [PATCH] fix: Update SFTConfig parameter - Change max_seq_length to max_length in SFTConfig constructor - TRL deprecated max_seq_length in Feb 2024 and removed it in v0.20.0 - Reference: https://github.com/huggingface/trl/pull/2895 This resolves the SFT training failure in CI tests --- .../post_training/huggingface/recipes/finetune_single_device.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py b/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py index ed9cd7755..2a024eb25 100644 --- a/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py +++ b/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py @@ -469,7 +469,7 @@ class HFFinetuningSingleDevice: use_cpu=True if device.type == "cpu" and not torch.backends.mps.is_available() else False, save_strategy=save_strategy, report_to="none", - max_seq_length=provider_config.max_seq_length, + max_length=provider_config.max_seq_length, gradient_accumulation_steps=config.gradient_accumulation_steps, gradient_checkpointing=provider_config.gradient_checkpointing, learning_rate=lr,