chore: re-enable isort enforcement (#1802)

# What does this PR do?

Re-enable isort enforcement.

It was disabled in 1a73f8305b, probably by
mistake.

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
Ihar Hrachyshka 2025-03-26 18:22:17 -04:00 committed by GitHub
parent 742020b94a
commit 193e531216
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 10 additions and 7 deletions

View file

@ -132,6 +132,7 @@ select = [
"N", # Naming "N", # Naming
"W", # Warnings "W", # Warnings
"DTZ", # datetime rules "DTZ", # datetime rules
"I", # isort (imports order)
] ]
ignore = [ ignore = [
# The following ignores are desired by the project maintainers. # The following ignores are desired by the project maintainers.

View file

@ -5,9 +5,10 @@
# the root directory of this source tree. # the root directory of this source tree.
import asyncio import asyncio
import pytest
from unittest.mock import AsyncMock, MagicMock, patch from unittest.mock import AsyncMock, MagicMock, patch
import pytest
mock_session = MagicMock() mock_session = MagicMock()
mock_session.closed = False mock_session.closed = False
mock_session.close = AsyncMock() mock_session.close = AsyncMock()

View file

@ -8,15 +8,16 @@ import os
import unittest import unittest
import warnings import warnings
from unittest.mock import patch from unittest.mock import patch
import pytest
import pytest
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig
from llama_stack_client.types.post_training_supervised_fine_tune_params import ( from llama_stack_client.types.post_training_supervised_fine_tune_params import (
TrainingConfig, TrainingConfig,
TrainingConfigDataConfig, TrainingConfigDataConfig,
TrainingConfigOptimizerConfig,
TrainingConfigEfficiencyConfig, TrainingConfigEfficiencyConfig,
TrainingConfigOptimizerConfig,
) )
from llama_stack.providers.remote.post_training.nvidia.post_training import ( from llama_stack.providers.remote.post_training.nvidia.post_training import (
NvidiaPostTrainingAdapter, NvidiaPostTrainingAdapter,
NvidiaPostTrainingConfig, NvidiaPostTrainingConfig,

View file

@ -6,10 +6,10 @@
import os import os
import unittest import unittest
from unittest.mock import patch
import warnings import warnings
import pytest from unittest.mock import patch
import pytest
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig
from llama_stack_client.types.post_training_supervised_fine_tune_params import ( from llama_stack_client.types.post_training_supervised_fine_tune_params import (
TrainingConfig, TrainingConfig,
@ -18,11 +18,11 @@ from llama_stack_client.types.post_training_supervised_fine_tune_params import (
) )
from llama_stack.providers.remote.post_training.nvidia.post_training import ( from llama_stack.providers.remote.post_training.nvidia.post_training import (
ListNvidiaPostTrainingJobs,
NvidiaPostTrainingAdapter, NvidiaPostTrainingAdapter,
NvidiaPostTrainingConfig, NvidiaPostTrainingConfig,
NvidiaPostTrainingJobStatusResponse,
ListNvidiaPostTrainingJobs,
NvidiaPostTrainingJob, NvidiaPostTrainingJob,
NvidiaPostTrainingJobStatusResponse,
) )