From 2a74f0db39de7d25bd4407a2535ef67593ad47f3 Mon Sep 17 00:00:00 2001 From: Ben Browning Date: Fri, 11 Apr 2025 13:17:57 -0400 Subject: [PATCH] fix: remove extra sft args in NvidiaPostTrainingAdapter (#1939) # What does this PR do? The supervised_fine_tune method in NvidiaPostTrainingAdapter had some extra args that aren't part of the post_training protocol, and these extra args were causing FastAPI to throw an error when attempting to stand up an endpoint that used this provider. (Closes #1938) ## Test Plan Before this change, bringing up a stack with the `nvidia` template failed. Afterwards, it passes. I'm testing this like: ``` INFERENCE_MODEL="meta/llama-3.1-8b-instruct" \ llama stack build --template nvidia --image-type venv --run ``` I also ensured the nvidia/test_supervised_fine_tuning.py tests still pass via: ``` python -m pytest \ tests/unit/providers/nvidia/test_supervised_fine_tuning.py ``` Signed-off-by: Ben Browning --- .../providers/remote/post_training/nvidia/post_training.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/llama_stack/providers/remote/post_training/nvidia/post_training.py b/llama_stack/providers/remote/post_training/nvidia/post_training.py index bacfdba0b..e14fcf0cc 100644 --- a/llama_stack/providers/remote/post_training/nvidia/post_training.py +++ b/llama_stack/providers/remote/post_training/nvidia/post_training.py @@ -206,10 +206,6 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): model: str, checkpoint_dir: Optional[str], algorithm_config: Optional[AlgorithmConfig] = None, - extra_json: Optional[Dict[str, Any]] = None, - params: Optional[Dict[str, Any]] = None, - headers: Optional[Dict[str, Any]] = None, - **kwargs, ) -> NvidiaPostTrainingJob: """ Fine-tunes a model on a dataset.