Use correct shapes in unit tests; remove use of unsupported params

This commit is contained in:
Jash Gulabrai 2025-04-25 15:52:12 -04:00
parent 26c10b5ab5
commit bb142435db
3 changed files with 68 additions and 51 deletions

View file

@ -245,6 +245,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
Supported models: Supported models:
- meta/llama-3.1-8b-instruct - meta/llama-3.1-8b-instruct
- meta/llama-3.2-1b-instruct
Supported algorithm configs: Supported algorithm configs:
- LoRA, SFT - LoRA, SFT
@ -290,10 +291,6 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
- LoRA config: - LoRA config:
## NeMo customizer specific LoRA parameters ## NeMo customizer specific LoRA parameters
- adapter_dim: int - Adapter dimension
Default: 8 (supports powers of 2)
- adapter_dropout: float - Adapter dropout
Default: None (0.0-1.0)
- alpha: int - Scaling factor for the LoRA update - alpha: int - Scaling factor for the LoRA update
Default: 16 Default: 16
Note: Note:
@ -336,7 +333,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
}, },
"data_config": {"dataset_id", "batch_size"}, "data_config": {"dataset_id", "batch_size"},
"optimizer_config": {"lr", "weight_decay"}, "optimizer_config": {"lr", "weight_decay"},
"lora_config": {"type", "adapter_dim", "adapter_dropout", "alpha"}, "lora_config": {"type", "alpha"},
} }
# Validate all parameters at once # Validate all parameters at once

View file

@ -10,14 +10,17 @@ import warnings
from unittest.mock import patch from unittest.mock import patch
import pytest import pytest
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig
from llama_stack_client.types.post_training_supervised_fine_tune_params import (
TrainingConfig,
TrainingConfigDataConfig,
TrainingConfigEfficiencyConfig,
TrainingConfigOptimizerConfig,
)
from llama_stack.apis.post_training.post_training import (
DataConfig,
DatasetFormat,
EfficiencyConfig,
LoraFinetuningConfig,
OptimizerConfig,
OptimizerType,
TrainingConfig,
)
from llama_stack.distribution.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import ( from llama_stack.providers.remote.post_training.nvidia.post_training import (
NvidiaPostTrainingAdapter, NvidiaPostTrainingAdapter,
NvidiaPostTrainingConfig, NvidiaPostTrainingConfig,
@ -66,11 +69,8 @@ class TestNvidiaParameters(unittest.TestCase):
def test_customizer_parameters_passed(self): def test_customizer_parameters_passed(self):
"""Test scenario 1: When an optional parameter is passed and value is correctly set.""" """Test scenario 1: When an optional parameter is passed and value is correctly set."""
custom_adapter_dim = 32 # Different from default of 8
algorithm_config = LoraFinetuningConfig( algorithm_config = LoraFinetuningConfig(
type="LoRA", type="LoRA",
adapter_dim=custom_adapter_dim,
adapter_dropout=0.2,
apply_lora_to_mlp=True, apply_lora_to_mlp=True,
apply_lora_to_output=True, apply_lora_to_output=True,
alpha=16, alpha=16,
@ -78,8 +78,15 @@ class TestNvidiaParameters(unittest.TestCase):
lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
) )
data_config = TrainingConfigDataConfig(dataset_id="test-dataset", batch_size=16) data_config = DataConfig(
optimizer_config = TrainingConfigOptimizerConfig(lr=0.0002) dataset_id="test-dataset", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct
)
optimizer_config = OptimizerConfig(
optimizer_type=OptimizerType.adam,
lr=0.0002,
weight_decay=0.01,
num_warmup_steps=100,
)
training_config = TrainingConfig( training_config = TrainingConfig(
n_epochs=3, n_epochs=3,
data_config=data_config, data_config=data_config,
@ -95,7 +102,7 @@ class TestNvidiaParameters(unittest.TestCase):
model="meta-llama/Llama-3.1-8B-Instruct", model="meta-llama/Llama-3.1-8B-Instruct",
checkpoint_dir="", checkpoint_dir="",
algorithm_config=algorithm_config, algorithm_config=algorithm_config,
training_config=training_config, training_config=convert_pydantic_to_json_value(training_config),
logger_config={}, logger_config={},
hyperparam_search_config={}, hyperparam_search_config={},
) )
@ -114,7 +121,7 @@ class TestNvidiaParameters(unittest.TestCase):
self._assert_request_params( self._assert_request_params(
{ {
"hyperparameters": { "hyperparameters": {
"lora": {"adapter_dim": custom_adapter_dim, "adapter_dropout": 0.2, "alpha": 16}, "lora": {"alpha": 16},
"epochs": 3, "epochs": 3,
"learning_rate": 0.0002, "learning_rate": 0.0002,
"batch_size": 16, "batch_size": 16,
@ -130,8 +137,6 @@ class TestNvidiaParameters(unittest.TestCase):
algorithm_config = LoraFinetuningConfig( algorithm_config = LoraFinetuningConfig(
type="LoRA", type="LoRA",
adapter_dim=16,
adapter_dropout=0.1,
apply_lora_to_mlp=True, apply_lora_to_mlp=True,
apply_lora_to_output=True, apply_lora_to_output=True,
alpha=16, alpha=16,
@ -139,12 +144,16 @@ class TestNvidiaParameters(unittest.TestCase):
lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
) )
data_config = TrainingConfigDataConfig( data_config = DataConfig(
dataset_id=required_dataset_id, # Required parameter dataset_id=required_dataset_id, batch_size=8, shuffle=False, data_format=DatasetFormat.instruct
batch_size=8,
) )
optimizer_config = TrainingConfigOptimizerConfig(lr=0.0001) optimizer_config = OptimizerConfig(
optimizer_type=OptimizerType.adam,
lr=0.0001,
weight_decay=0.01,
num_warmup_steps=100,
)
training_config = TrainingConfig( training_config = TrainingConfig(
n_epochs=1, n_epochs=1,
@ -161,7 +170,7 @@ class TestNvidiaParameters(unittest.TestCase):
model=required_model, # Required parameter model=required_model, # Required parameter
checkpoint_dir="", checkpoint_dir="",
algorithm_config=algorithm_config, algorithm_config=algorithm_config,
training_config=training_config, training_config=convert_pydantic_to_json_value(training_config),
logger_config={}, logger_config={},
hyperparam_search_config={}, hyperparam_search_config={},
) )
@ -186,24 +195,24 @@ class TestNvidiaParameters(unittest.TestCase):
def test_unsupported_parameters_warning(self): def test_unsupported_parameters_warning(self):
"""Test that warnings are raised for unsupported parameters.""" """Test that warnings are raised for unsupported parameters."""
data_config = TrainingConfigDataConfig( data_config = DataConfig(
dataset_id="test-dataset", dataset_id="test-dataset",
batch_size=8, batch_size=8,
# Unsupported parameters # Unsupported parameters
shuffle=True, shuffle=True,
data_format="instruct", data_format=DatasetFormat.instruct,
validation_dataset_id="val-dataset", validation_dataset_id="val-dataset",
) )
optimizer_config = TrainingConfigOptimizerConfig( optimizer_config = OptimizerConfig(
lr=0.0001, lr=0.0001,
weight_decay=0.01, weight_decay=0.01,
# Unsupported parameters # Unsupported parameters
optimizer_type="adam", optimizer_type=OptimizerType.adam,
num_warmup_steps=100, num_warmup_steps=100,
) )
efficiency_config = TrainingConfigEfficiencyConfig( efficiency_config = EfficiencyConfig(
enable_activation_checkpointing=True # Unsupported parameter enable_activation_checkpointing=True # Unsupported parameter
) )
@ -230,15 +239,13 @@ class TestNvidiaParameters(unittest.TestCase):
checkpoint_dir="test-dir", # Unsupported parameter checkpoint_dir="test-dir", # Unsupported parameter
algorithm_config=LoraFinetuningConfig( algorithm_config=LoraFinetuningConfig(
type="LoRA", type="LoRA",
adapter_dim=16,
adapter_dropout=0.1,
apply_lora_to_mlp=True, apply_lora_to_mlp=True,
apply_lora_to_output=True, apply_lora_to_output=True,
alpha=16, alpha=16,
rank=16, rank=16,
lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
), ),
training_config=training_config, training_config=convert_pydantic_to_json_value(training_config),
logger_config={"test": "value"}, # Unsupported parameter logger_config={"test": "value"}, # Unsupported parameter
hyperparam_search_config={"test": "value"}, # Unsupported parameter hyperparam_search_config={"test": "value"}, # Unsupported parameter
) )

View file

@ -10,13 +10,17 @@ import warnings
from unittest.mock import patch from unittest.mock import patch
import pytest import pytest
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig
from llama_stack_client.types.post_training_supervised_fine_tune_params import (
TrainingConfig,
TrainingConfigDataConfig,
TrainingConfigOptimizerConfig,
)
from llama_stack.apis.post_training.post_training import (
DataConfig,
DatasetFormat,
LoraFinetuningConfig,
OptimizerConfig,
OptimizerType,
QATFinetuningConfig,
TrainingConfig,
)
from llama_stack.distribution.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import ( from llama_stack.providers.remote.post_training.nvidia.post_training import (
ListNvidiaPostTrainingJobs, ListNvidiaPostTrainingJobs,
NvidiaPostTrainingAdapter, NvidiaPostTrainingAdapter,
@ -105,7 +109,7 @@ class TestNvidiaPostTraining(unittest.TestCase):
"batch_size": 16, "batch_size": 16,
"epochs": 2, "epochs": 2,
"learning_rate": 0.0001, "learning_rate": 0.0001,
"lora": {"adapter_dim": 16, "adapter_dropout": 0.1}, "lora": {"alpha": 16},
}, },
"output_model": "default/job-1234", "output_model": "default/job-1234",
"status": "created", "status": "created",
@ -116,8 +120,6 @@ class TestNvidiaPostTraining(unittest.TestCase):
algorithm_config = LoraFinetuningConfig( algorithm_config = LoraFinetuningConfig(
type="LoRA", type="LoRA",
adapter_dim=16,
adapter_dropout=0.1,
apply_lora_to_mlp=True, apply_lora_to_mlp=True,
apply_lora_to_output=True, apply_lora_to_output=True,
alpha=16, alpha=16,
@ -125,10 +127,15 @@ class TestNvidiaPostTraining(unittest.TestCase):
lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
) )
data_config = TrainingConfigDataConfig(dataset_id="sample-basic-test", batch_size=16) data_config = DataConfig(
dataset_id="sample-basic-test", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct
)
optimizer_config = TrainingConfigOptimizerConfig( optimizer_config = OptimizerConfig(
optimizer_type=OptimizerType.adam,
lr=0.0001, lr=0.0001,
weight_decay=0.01,
num_warmup_steps=100,
) )
training_config = TrainingConfig( training_config = TrainingConfig(
@ -145,7 +152,7 @@ class TestNvidiaPostTraining(unittest.TestCase):
model="meta-llama/Llama-3.1-8B-Instruct", model="meta-llama/Llama-3.1-8B-Instruct",
checkpoint_dir="", checkpoint_dir="",
algorithm_config=algorithm_config, algorithm_config=algorithm_config,
training_config=training_config, training_config=convert_pydantic_to_json_value(training_config),
logger_config={}, logger_config={},
hyperparam_search_config={}, hyperparam_search_config={},
) )
@ -169,16 +176,22 @@ class TestNvidiaPostTraining(unittest.TestCase):
"epochs": 2, "epochs": 2,
"batch_size": 16, "batch_size": 16,
"learning_rate": 0.0001, "learning_rate": 0.0001,
"lora": {"alpha": 16, "adapter_dim": 16, "adapter_dropout": 0.1}, "weight_decay": 0.01,
"lora": {"alpha": 16},
}, },
}, },
) )
def test_supervised_fine_tune_with_qat(self): def test_supervised_fine_tune_with_qat(self):
algorithm_config = QatFinetuningConfig(type="QAT", quantizer_name="quantizer_name", group_size=1) algorithm_config = QATFinetuningConfig(type="QAT", quantizer_name="quantizer_name", group_size=1)
data_config = TrainingConfigDataConfig(dataset_id="sample-basic-test", batch_size=16) data_config = DataConfig(
optimizer_config = TrainingConfigOptimizerConfig( dataset_id="sample-basic-test", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct
)
optimizer_config = OptimizerConfig(
optimizer_type=OptimizerType.adam,
lr=0.0001, lr=0.0001,
weight_decay=0.01,
num_warmup_steps=100,
) )
training_config = TrainingConfig( training_config = TrainingConfig(
n_epochs=2, n_epochs=2,
@ -193,7 +206,7 @@ class TestNvidiaPostTraining(unittest.TestCase):
model="meta-llama/Llama-3.1-8B-Instruct", model="meta-llama/Llama-3.1-8B-Instruct",
checkpoint_dir="", checkpoint_dir="",
algorithm_config=algorithm_config, algorithm_config=algorithm_config,
training_config=training_config, training_config=convert_pydantic_to_json_value(training_config),
logger_config={}, logger_config={},
hyperparam_search_config={}, hyperparam_search_config={},
) )