From db5492202f17c9de28a2f5b34532821c8ebd0882 Mon Sep 17 00:00:00 2001 From: Botao Chen Date: Tue, 14 Jan 2025 18:06:37 -0800 Subject: [PATCH] commit --- .../torchtune/recipes/lora_finetuning_single_device.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py index 0e0149ad5..80e206ebb 100644 --- a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +++ b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py @@ -587,9 +587,6 @@ class LoraFinetuningSingleDevice: gc.collect() torch.cuda.empty_cache() - print("Allocated:", torch.cuda.memory_allocated() / 1e6, "MB") - print("Reserved: ", torch.cuda.memory_reserved() / 1e6, "MB") - return (memory_stats, checkpoints) async def validation(self) -> Tuple[float, float]: