mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
chore: re-enable isort enforcement (#1802)
# What does this PR do?
Re-enable isort enforcement.
It was disabled in 1a73f8305b
, probably by
mistake.
Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
parent
742020b94a
commit
193e531216
4 changed files with 10 additions and 7 deletions
|
@ -132,6 +132,7 @@ select = [
|
||||||
"N", # Naming
|
"N", # Naming
|
||||||
"W", # Warnings
|
"W", # Warnings
|
||||||
"DTZ", # datetime rules
|
"DTZ", # datetime rules
|
||||||
|
"I", # isort (imports order)
|
||||||
]
|
]
|
||||||
ignore = [
|
ignore = [
|
||||||
# The following ignores are desired by the project maintainers.
|
# The following ignores are desired by the project maintainers.
|
||||||
|
|
|
@ -5,9 +5,10 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import pytest
|
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
mock_session = MagicMock()
|
mock_session = MagicMock()
|
||||||
mock_session.closed = False
|
mock_session.closed = False
|
||||||
mock_session.close = AsyncMock()
|
mock_session.close = AsyncMock()
|
||||||
|
|
|
@ -8,15 +8,16 @@ import os
|
||||||
import unittest
|
import unittest
|
||||||
import warnings
|
import warnings
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
import pytest
|
|
||||||
|
|
||||||
|
import pytest
|
||||||
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig
|
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig
|
||||||
from llama_stack_client.types.post_training_supervised_fine_tune_params import (
|
from llama_stack_client.types.post_training_supervised_fine_tune_params import (
|
||||||
TrainingConfig,
|
TrainingConfig,
|
||||||
TrainingConfigDataConfig,
|
TrainingConfigDataConfig,
|
||||||
TrainingConfigOptimizerConfig,
|
|
||||||
TrainingConfigEfficiencyConfig,
|
TrainingConfigEfficiencyConfig,
|
||||||
|
TrainingConfigOptimizerConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
||||||
NvidiaPostTrainingAdapter,
|
NvidiaPostTrainingAdapter,
|
||||||
NvidiaPostTrainingConfig,
|
NvidiaPostTrainingConfig,
|
||||||
|
|
|
@ -6,10 +6,10 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import unittest
|
import unittest
|
||||||
from unittest.mock import patch
|
|
||||||
import warnings
|
import warnings
|
||||||
import pytest
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig
|
from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig
|
||||||
from llama_stack_client.types.post_training_supervised_fine_tune_params import (
|
from llama_stack_client.types.post_training_supervised_fine_tune_params import (
|
||||||
TrainingConfig,
|
TrainingConfig,
|
||||||
|
@ -18,11 +18,11 @@ from llama_stack_client.types.post_training_supervised_fine_tune_params import (
|
||||||
)
|
)
|
||||||
|
|
||||||
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
||||||
|
ListNvidiaPostTrainingJobs,
|
||||||
NvidiaPostTrainingAdapter,
|
NvidiaPostTrainingAdapter,
|
||||||
NvidiaPostTrainingConfig,
|
NvidiaPostTrainingConfig,
|
||||||
NvidiaPostTrainingJobStatusResponse,
|
|
||||||
ListNvidiaPostTrainingJobs,
|
|
||||||
NvidiaPostTrainingJob,
|
NvidiaPostTrainingJob,
|
||||||
|
NvidiaPostTrainingJobStatusResponse,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue