fix: Pass model param as configuration name to NeMo Customizer

This commit is contained in:
Jash Gulabrai 2025-05-20 09:43:51 -04:00
parent ed7b4731aa
commit 1d94f3617a
2 changed files with 5 additions and 8 deletions

View file

@ -165,7 +165,7 @@ class TestNvidiaPostTraining(unittest.TestCase):
training_job = self.run_async(
self.adapter.supervised_fine_tune(
job_uuid="1234",
model="meta-llama/Llama-3.1-8B-Instruct",
model="meta/llama-3.2-1b-instruct@v1.0.0+L40",
checkpoint_dir="",
algorithm_config=algorithm_config,
training_config=convert_pydantic_to_json_value(training_config),
@ -184,7 +184,7 @@ class TestNvidiaPostTraining(unittest.TestCase):
"POST",
"/v1/customization/jobs",
expected_json={
"config": "meta/llama-3.1-8b-instruct",
"config": "meta/llama-3.2-1b-instruct@v1.0.0+L40",
"dataset": {"name": "sample-basic-test", "namespace": "default"},
"hyperparameters": {
"training_type": "sft",
@ -219,7 +219,7 @@ class TestNvidiaPostTraining(unittest.TestCase):
self.run_async(
self.adapter.supervised_fine_tune(
job_uuid="1234",
model="meta-llama/Llama-3.1-8B-Instruct",
model="meta/llama-3.2-1b-instruct@v1.0.0+L40",
checkpoint_dir="",
algorithm_config=algorithm_config,
training_config=convert_pydantic_to_json_value(training_config),