From bae197c37e345296bd6e7519eee00dec109fe62f Mon Sep 17 00:00:00 2001 From: Botao Chen Date: Fri, 20 Dec 2024 16:12:02 -0800 Subject: [PATCH] Fix post training apis broken by torchtune release (#674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a torchtune release this morning https://github.com/pytorch/torchtune/releases/tag/v0.5.0 and breaks post training apis ## test spinning up server and the post training works again after the fix Screenshot 2024-12-20 at 4 08 54 PM ## Note We need to think hard of how to avoid this happen again and have a fast follow up on this after holidays --- .../torchtune/recipes/lora_finetuning_single_device.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py index cc430577f..71b8bf759 100644 --- a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +++ b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py @@ -43,7 +43,6 @@ from torchtune.modules.peft import ( get_adapter_state_dict, get_lora_module_names, get_merged_lora_ckpt, - load_dora_magnitudes, set_trainable_params, validate_missing_and_unexpected_for_lora, ) @@ -281,7 +280,6 @@ class LoraFinetuningSingleDevice: for m in model.modules(): if hasattr(m, "initialize_dora_magnitude"): m.initialize_dora_magnitude() - load_dora_magnitudes(model) if lora_weights_state_dict: lora_missing, lora_unexpected = model.load_state_dict( lora_weights_state_dict, strict=False