fix: Don't require efficiency_config for torchtune

It was enforced by mistake when
0751a960a5 merged.

Other asserts made sense in that the code was written, potentially, to
always expect a non-None value. But not efficiency_config.

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
Ihar Hrachyshka 2025-05-05 15:53:05 -04:00
parent 4597145011
commit 75c54547eb

View file

@ -39,7 +39,6 @@ from llama_stack.apis.datasets import Datasets
from llama_stack.apis.post_training import ( from llama_stack.apis.post_training import (
Checkpoint, Checkpoint,
DataConfig, DataConfig,
EfficiencyConfig,
LoraFinetuningConfig, LoraFinetuningConfig,
OptimizerConfig, OptimizerConfig,
QATFinetuningConfig, QATFinetuningConfig,
@ -90,8 +89,6 @@ class LoraFinetuningSingleDevice:
) -> None: ) -> None:
assert isinstance(training_config.data_config, DataConfig), "DataConfig must be initialized" assert isinstance(training_config.data_config, DataConfig), "DataConfig must be initialized"
assert isinstance(training_config.efficiency_config, EfficiencyConfig), "EfficiencyConfig must be initialized"
self.job_uuid = job_uuid self.job_uuid = job_uuid
self.training_config = training_config self.training_config = training_config
if not isinstance(algorithm_config, LoraFinetuningConfig): if not isinstance(algorithm_config, LoraFinetuningConfig):