diff --git a/pyproject.toml b/pyproject.toml index 9eef66672..c1b6e101b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,6 +132,7 @@ select = [ "N", # Naming "W", # Warnings "DTZ", # datetime rules + "I", # isort (imports order) ] ignore = [ # The following ignores are desired by the project maintainers. diff --git a/tests/unit/providers/nvidia/conftest.py b/tests/unit/providers/nvidia/conftest.py index 1c4a1d145..c43e1cd77 100644 --- a/tests/unit/providers/nvidia/conftest.py +++ b/tests/unit/providers/nvidia/conftest.py @@ -5,9 +5,10 @@ # the root directory of this source tree. import asyncio -import pytest from unittest.mock import AsyncMock, MagicMock, patch +import pytest + mock_session = MagicMock() mock_session.closed = False mock_session.close = AsyncMock() diff --git a/tests/unit/providers/nvidia/test_parameters.py b/tests/unit/providers/nvidia/test_parameters.py index bf579fbfb..cb1b92fba 100644 --- a/tests/unit/providers/nvidia/test_parameters.py +++ b/tests/unit/providers/nvidia/test_parameters.py @@ -8,15 +8,16 @@ import os import unittest import warnings from unittest.mock import patch -import pytest +import pytest from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig from llama_stack_client.types.post_training_supervised_fine_tune_params import ( TrainingConfig, TrainingConfigDataConfig, - TrainingConfigOptimizerConfig, TrainingConfigEfficiencyConfig, + TrainingConfigOptimizerConfig, ) + from llama_stack.providers.remote.post_training.nvidia.post_training import ( NvidiaPostTrainingAdapter, NvidiaPostTrainingConfig, diff --git a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py index 8ef48e05b..7ce89144b 100644 --- a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py +++ b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py @@ -6,10 +6,10 @@ import os import unittest -from unittest.mock import patch import warnings -import pytest +from unittest.mock import patch +import pytest from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig from llama_stack_client.types.post_training_supervised_fine_tune_params import ( TrainingConfig, @@ -18,11 +18,11 @@ from llama_stack_client.types.post_training_supervised_fine_tune_params import ( ) from llama_stack.providers.remote.post_training.nvidia.post_training import ( + ListNvidiaPostTrainingJobs, NvidiaPostTrainingAdapter, NvidiaPostTrainingConfig, - NvidiaPostTrainingJobStatusResponse, - ListNvidiaPostTrainingJobs, NvidiaPostTrainingJob, + NvidiaPostTrainingJobStatusResponse, )