mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 13:00:39 +00:00
fix: don't attempt to clean gpu memory up when device is cpu
This is a follow up to: https://github.com/meta-llama/llama-stack/pull/1140 Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
parent
736560ceba
commit
4027029420
1 changed files with 3 additions and 2 deletions
|
@ -547,10 +547,11 @@ class LoraFinetuningSingleDevice:
|
|||
checkpoints.append(checkpoint)
|
||||
|
||||
# clean up the memory after training finishes
|
||||
self._model.to("cpu")
|
||||
if self._device.type != "cpu":
|
||||
self._model.to("cpu")
|
||||
torch.cuda.empty_cache()
|
||||
del self._model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
return (memory_stats, checkpoints)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue