diff --git a/llama_stack/providers/remote/post_training/nvidia/post_training.py b/llama_stack/providers/remote/post_training/nvidia/post_training.py index d3de930f7..c74fb2a24 100644 --- a/llama_stack/providers/remote/post_training/nvidia/post_training.py +++ b/llama_stack/providers/remote/post_training/nvidia/post_training.py @@ -67,13 +67,18 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): self.timeout = aiohttp.ClientTimeout(total=config.timeout) # TODO: filter by available models based on /config endpoint ModelRegistryHelper.__init__(self, model_entries=_MODEL_ENTRIES) - self.session = aiohttp.ClientSession(headers=self.headers, timeout=self.timeout) - self.customizer_url = config.customizer_url + self.session = None + self.customizer_url = config.customizer_url if not self.customizer_url: warnings.warn("Customizer URL is not set, using default value: http://nemo.test", stacklevel=2) self.customizer_url = "http://nemo.test" + async def _get_session(self) -> aiohttp.ClientSession: + if self.session is None or self.session.closed: + self.session = aiohttp.ClientSession(headers=self.headers, timeout=self.timeout) + return self.session + async def _make_request( self, method: str, @@ -94,8 +99,9 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): if json and "Content-Type" not in request_headers: request_headers["Content-Type"] = "application/json" + session = await self._get_session() for _ in range(self.config.max_retries): - async with self.session.request(method, url, params=params, json=json, **kwargs) as response: + async with session.request(method, url, params=params, json=json, **kwargs) as response: if response.status >= 400: error_data = await response.json() raise Exception(f"API request failed: {error_data}") @@ -122,8 +128,8 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): jobs = [] for job in response.get("data", []): job_id = job.pop("id") - job_status = job.pop("status", "unknown").lower() - mapped_status = STATUS_MAPPING.get(job_status, "unknown") + job_status = job.pop("status", "scheduled").lower() + mapped_status = STATUS_MAPPING.get(job_status, "scheduled") # Convert string timestamps to datetime objects created_at = ( @@ -177,7 +183,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): ) api_status = response.pop("status").lower() - mapped_status = STATUS_MAPPING.get(api_status, "unknown") + mapped_status = STATUS_MAPPING.get(api_status, "scheduled") return NvidiaPostTrainingJobStatusResponse( status=JobStatus(mapped_status), @@ -239,6 +245,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): Supported models: - meta/llama-3.1-8b-instruct + - meta/llama-3.2-1b-instruct Supported algorithm configs: - LoRA, SFT @@ -284,10 +291,6 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): - LoRA config: ## NeMo customizer specific LoRA parameters - - adapter_dim: int - Adapter dimension - Default: 8 (supports powers of 2) - - adapter_dropout: float - Adapter dropout - Default: None (0.0-1.0) - alpha: int - Scaling factor for the LoRA update Default: 16 Note: @@ -297,7 +300,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): User is informed about unsupported parameters via warnings. """ # Map model to nvidia model name - # ToDo: only supports llama-3.1-8b-instruct now, need to update this to support other models + # See `_MODEL_ENTRIES` for supported models nvidia_model = self.get_provider_model_id(model) # Check for unsupported method parameters @@ -330,7 +333,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): }, "data_config": {"dataset_id", "batch_size"}, "optimizer_config": {"lr", "weight_decay"}, - "lora_config": {"type", "adapter_dim", "adapter_dropout", "alpha"}, + "lora_config": {"type", "alpha"}, } # Validate all parameters at once @@ -389,16 +392,10 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): # Handle LoRA-specific configuration if algorithm_config: - if isinstance(algorithm_config, dict) and algorithm_config.get("type") == "LoRA": + if algorithm_config.type == "LoRA": warn_unsupported_params(algorithm_config, supported_params["lora_config"], "LoRA config") job_config["hyperparameters"]["lora"] = { - k: v - for k, v in { - "adapter_dim": algorithm_config.get("adapter_dim"), - "alpha": algorithm_config.get("alpha"), - "adapter_dropout": algorithm_config.get("adapter_dropout"), - }.items() - if v is not None + k: v for k, v in {"alpha": algorithm_config.alpha}.items() if v is not None } else: raise NotImplementedError(f"Unsupported algorithm config: {algorithm_config}") diff --git a/tests/unit/providers/nvidia/test_parameters.py b/tests/unit/providers/nvidia/test_parameters.py index cb1b92fba..ea12122a0 100644 --- a/tests/unit/providers/nvidia/test_parameters.py +++ b/tests/unit/providers/nvidia/test_parameters.py @@ -10,14 +10,17 @@ import warnings from unittest.mock import patch import pytest -from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig -from llama_stack_client.types.post_training_supervised_fine_tune_params import ( - TrainingConfig, - TrainingConfigDataConfig, - TrainingConfigEfficiencyConfig, - TrainingConfigOptimizerConfig, -) +from llama_stack.apis.post_training.post_training import ( + DataConfig, + DatasetFormat, + EfficiencyConfig, + LoraFinetuningConfig, + OptimizerConfig, + OptimizerType, + TrainingConfig, +) +from llama_stack.distribution.library_client import convert_pydantic_to_json_value from llama_stack.providers.remote.post_training.nvidia.post_training import ( NvidiaPostTrainingAdapter, NvidiaPostTrainingConfig, @@ -66,11 +69,8 @@ class TestNvidiaParameters(unittest.TestCase): def test_customizer_parameters_passed(self): """Test scenario 1: When an optional parameter is passed and value is correctly set.""" - custom_adapter_dim = 32 # Different from default of 8 algorithm_config = LoraFinetuningConfig( type="LoRA", - adapter_dim=custom_adapter_dim, - adapter_dropout=0.2, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, @@ -78,8 +78,15 @@ class TestNvidiaParameters(unittest.TestCase): lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) - data_config = TrainingConfigDataConfig(dataset_id="test-dataset", batch_size=16) - optimizer_config = TrainingConfigOptimizerConfig(lr=0.0002) + data_config = DataConfig( + dataset_id="test-dataset", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct + ) + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, + lr=0.0002, + weight_decay=0.01, + num_warmup_steps=100, + ) training_config = TrainingConfig( n_epochs=3, data_config=data_config, @@ -95,7 +102,7 @@ class TestNvidiaParameters(unittest.TestCase): model="meta-llama/Llama-3.1-8B-Instruct", checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) @@ -114,7 +121,7 @@ class TestNvidiaParameters(unittest.TestCase): self._assert_request_params( { "hyperparameters": { - "lora": {"adapter_dim": custom_adapter_dim, "adapter_dropout": 0.2, "alpha": 16}, + "lora": {"alpha": 16}, "epochs": 3, "learning_rate": 0.0002, "batch_size": 16, @@ -130,8 +137,6 @@ class TestNvidiaParameters(unittest.TestCase): algorithm_config = LoraFinetuningConfig( type="LoRA", - adapter_dim=16, - adapter_dropout=0.1, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, @@ -139,12 +144,16 @@ class TestNvidiaParameters(unittest.TestCase): lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) - data_config = TrainingConfigDataConfig( - dataset_id=required_dataset_id, # Required parameter - batch_size=8, + data_config = DataConfig( + dataset_id=required_dataset_id, batch_size=8, shuffle=False, data_format=DatasetFormat.instruct ) - optimizer_config = TrainingConfigOptimizerConfig(lr=0.0001) + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, + lr=0.0001, + weight_decay=0.01, + num_warmup_steps=100, + ) training_config = TrainingConfig( n_epochs=1, @@ -161,7 +170,7 @@ class TestNvidiaParameters(unittest.TestCase): model=required_model, # Required parameter checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) @@ -186,24 +195,24 @@ class TestNvidiaParameters(unittest.TestCase): def test_unsupported_parameters_warning(self): """Test that warnings are raised for unsupported parameters.""" - data_config = TrainingConfigDataConfig( + data_config = DataConfig( dataset_id="test-dataset", batch_size=8, # Unsupported parameters shuffle=True, - data_format="instruct", + data_format=DatasetFormat.instruct, validation_dataset_id="val-dataset", ) - optimizer_config = TrainingConfigOptimizerConfig( + optimizer_config = OptimizerConfig( lr=0.0001, weight_decay=0.01, # Unsupported parameters - optimizer_type="adam", + optimizer_type=OptimizerType.adam, num_warmup_steps=100, ) - efficiency_config = TrainingConfigEfficiencyConfig( + efficiency_config = EfficiencyConfig( enable_activation_checkpointing=True # Unsupported parameter ) @@ -230,15 +239,13 @@ class TestNvidiaParameters(unittest.TestCase): checkpoint_dir="test-dir", # Unsupported parameter algorithm_config=LoraFinetuningConfig( type="LoRA", - adapter_dim=16, - adapter_dropout=0.1, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, rank=16, lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ), - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={"test": "value"}, # Unsupported parameter hyperparam_search_config={"test": "value"}, # Unsupported parameter ) diff --git a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py index 09f67e4e6..319011be3 100644 --- a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py +++ b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py @@ -10,14 +10,18 @@ import warnings from unittest.mock import patch import pytest -from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig -from llama_stack_client.types.post_training_supervised_fine_tune_params import ( - TrainingConfig, - TrainingConfigDataConfig, - TrainingConfigOptimizerConfig, -) from llama_stack.apis.models import Model, ModelType +from llama_stack.apis.post_training.post_training import ( + DataConfig, + DatasetFormat, + LoraFinetuningConfig, + OptimizerConfig, + OptimizerType, + QATFinetuningConfig, + TrainingConfig, +) +from llama_stack.distribution.library_client import convert_pydantic_to_json_value from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAConfig, NVIDIAInferenceAdapter from llama_stack.providers.remote.post_training.nvidia.post_training import ( ListNvidiaPostTrainingJobs, @@ -121,7 +125,7 @@ class TestNvidiaPostTraining(unittest.TestCase): "batch_size": 16, "epochs": 2, "learning_rate": 0.0001, - "lora": {"adapter_dim": 16, "adapter_dropout": 0.1}, + "lora": {"alpha": 16}, }, "output_model": "default/job-1234", "status": "created", @@ -132,8 +136,6 @@ class TestNvidiaPostTraining(unittest.TestCase): algorithm_config = LoraFinetuningConfig( type="LoRA", - adapter_dim=16, - adapter_dropout=0.1, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, @@ -141,10 +143,15 @@ class TestNvidiaPostTraining(unittest.TestCase): lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) - data_config = TrainingConfigDataConfig(dataset_id="sample-basic-test", batch_size=16) + data_config = DataConfig( + dataset_id="sample-basic-test", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct + ) - optimizer_config = TrainingConfigOptimizerConfig( + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, lr=0.0001, + weight_decay=0.01, + num_warmup_steps=100, ) training_config = TrainingConfig( @@ -161,7 +168,7 @@ class TestNvidiaPostTraining(unittest.TestCase): model="meta-llama/Llama-3.1-8B-Instruct", checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) @@ -185,16 +192,22 @@ class TestNvidiaPostTraining(unittest.TestCase): "epochs": 2, "batch_size": 16, "learning_rate": 0.0001, - "lora": {"alpha": 16, "adapter_dim": 16, "adapter_dropout": 0.1}, + "weight_decay": 0.01, + "lora": {"alpha": 16}, }, }, ) def test_supervised_fine_tune_with_qat(self): - algorithm_config = QatFinetuningConfig(type="QAT", quantizer_name="quantizer_name", group_size=1) - data_config = TrainingConfigDataConfig(dataset_id="sample-basic-test", batch_size=16) - optimizer_config = TrainingConfigOptimizerConfig( + algorithm_config = QATFinetuningConfig(type="QAT", quantizer_name="quantizer_name", group_size=1) + data_config = DataConfig( + dataset_id="sample-basic-test", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct + ) + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, lr=0.0001, + weight_decay=0.01, + num_warmup_steps=100, ) training_config = TrainingConfig( n_epochs=2, @@ -209,7 +222,7 @@ class TestNvidiaPostTraining(unittest.TestCase): model="meta-llama/Llama-3.1-8B-Instruct", checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, )