diff --git a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py index 0e0149ad5..80e206ebb 100644 --- a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py +++ b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py @@ -587,9 +587,6 @@ class LoraFinetuningSingleDevice: gc.collect() torch.cuda.empty_cache() - print("Allocated:", torch.cuda.memory_allocated() / 1e6, "MB") - print("Reserved: ", torch.cuda.memory_reserved() / 1e6, "MB") - return (memory_stats, checkpoints) async def validation(self) -> Tuple[float, float]: