From 4ae8c63a2b4d349afd1ec4219ff9b6edae5beb6f Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Sat, 28 Sep 2024 16:04:41 -0700 Subject: [PATCH] pre-commit lint --- llama_stack/apis/inference/event_logger.py | 3 ++- llama_stack/cli/model/describe.py | 4 ++-- llama_stack/cli/stack/run.py | 1 + llama_stack/distribution/configure.py | 7 ++++--- llama_stack/distribution/server/server.py | 4 ++-- llama_stack/distribution/utils/config_dirs.py | 4 +++- llama_stack/distribution/utils/dynamic.py | 1 - .../providers/adapters/inference/together/together.py | 2 +- .../impls/meta_reference/inference/quantization/loader.py | 7 ++++--- 9 files changed, 19 insertions(+), 14 deletions(-) diff --git a/llama_stack/apis/inference/event_logger.py b/llama_stack/apis/inference/event_logger.py index c64ffb6bd..d97ece6d4 100644 --- a/llama_stack/apis/inference/event_logger.py +++ b/llama_stack/apis/inference/event_logger.py @@ -4,11 +4,12 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from termcolor import cprint + from llama_stack.apis.inference import ( ChatCompletionResponseEventType, ChatCompletionResponseStreamChunk, ) -from termcolor import cprint class LogEvent: diff --git a/llama_stack/cli/model/describe.py b/llama_stack/cli/model/describe.py index 6b5325a03..c86487ae6 100644 --- a/llama_stack/cli/model/describe.py +++ b/llama_stack/cli/model/describe.py @@ -9,12 +9,12 @@ import json from llama_models.sku_list import resolve_model +from termcolor import colored + from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.table import print_table from llama_stack.distribution.utils.serialize import EnumEncoder -from termcolor import colored - class ModelDescribe(Subcommand): """Show details about a model""" diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py index 4e2009ee2..1c528baed 100644 --- a/llama_stack/cli/stack/run.py +++ b/llama_stack/cli/stack/run.py @@ -46,6 +46,7 @@ class StackRun(Subcommand): import pkg_resources import yaml + from llama_stack.distribution.build import ImageType from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR diff --git a/llama_stack/distribution/configure.py b/llama_stack/distribution/configure.py index 35130c027..879738c00 100644 --- a/llama_stack/distribution/configure.py +++ b/llama_stack/distribution/configure.py @@ -9,6 +9,10 @@ from typing import Any from pydantic import BaseModel from llama_stack.distribution.datatypes import * # noqa: F403 +from prompt_toolkit import prompt +from prompt_toolkit.validation import Validator +from termcolor import cprint + from llama_stack.apis.memory.memory import MemoryBankType from llama_stack.distribution.distribution import ( api_providers, @@ -21,9 +25,6 @@ from llama_stack.distribution.utils.prompt_for_config import prompt_for_config from llama_stack.providers.impls.meta_reference.safety.config import ( MetaReferenceShieldType, ) -from prompt_toolkit import prompt -from prompt_toolkit.validation import Validator -from termcolor import cprint def make_routing_entry_type(config_class: Any): diff --git a/llama_stack/distribution/server/server.py b/llama_stack/distribution/server/server.py index fb86e4ae3..a32c470d5 100644 --- a/llama_stack/distribution/server/server.py +++ b/llama_stack/distribution/server/server.py @@ -435,13 +435,13 @@ def main(yaml_config: str, port: int = 5000, disable_ipv6: bool = False): apis_to_serve = set(config.apis_to_serve) else: apis_to_serve = set(impls.keys()) - + for api_str in apis_to_serve: api = Api(api_str) endpoints = all_endpoints[api] impl = impls[api] - + provider_spec = specs[api] if ( isinstance(provider_spec, RemoteProviderSpec) diff --git a/llama_stack/distribution/utils/config_dirs.py b/llama_stack/distribution/utils/config_dirs.py index eca59493f..7a58e91f4 100644 --- a/llama_stack/distribution/utils/config_dirs.py +++ b/llama_stack/distribution/utils/config_dirs.py @@ -8,7 +8,9 @@ import os from pathlib import Path -LLAMA_STACK_CONFIG_DIR = Path(os.getenv("LLAMA_STACK_CONFIG_DIR", os.path.expanduser("~/.llama/"))) +LLAMA_STACK_CONFIG_DIR = Path( + os.getenv("LLAMA_STACK_CONFIG_DIR", os.path.expanduser("~/.llama/")) +) DISTRIBS_BASE_DIR = LLAMA_STACK_CONFIG_DIR / "distributions" diff --git a/llama_stack/distribution/utils/dynamic.py b/llama_stack/distribution/utils/dynamic.py index e15ab63d6..7c2ac2e6a 100644 --- a/llama_stack/distribution/utils/dynamic.py +++ b/llama_stack/distribution/utils/dynamic.py @@ -8,7 +8,6 @@ import importlib from typing import Any, Dict from llama_stack.distribution.datatypes import * # noqa: F403 -from termcolor import cprint def instantiate_class_type(fully_qualified_name): diff --git a/llama_stack/providers/adapters/inference/together/together.py b/llama_stack/providers/adapters/inference/together/together.py index a56b18d7d..0737868ac 100644 --- a/llama_stack/providers/adapters/inference/together/together.py +++ b/llama_stack/providers/adapters/inference/together/together.py @@ -15,10 +15,10 @@ from llama_models.sku_list import resolve_model from together import Together from llama_stack.apis.inference import * # noqa: F403 +from llama_stack.distribution.request_headers import get_request_provider_data from llama_stack.providers.utils.inference.augment_messages import ( augment_messages_for_tools, ) -from llama_stack.distribution.request_headers import get_request_provider_data from .config import TogetherImplConfig diff --git a/llama_stack/providers/impls/meta_reference/inference/quantization/loader.py b/llama_stack/providers/impls/meta_reference/inference/quantization/loader.py index 9d28c9853..9c5182ead 100644 --- a/llama_stack/providers/impls/meta_reference/inference/quantization/loader.py +++ b/llama_stack/providers/impls/meta_reference/inference/quantization/loader.py @@ -14,6 +14,10 @@ import torch from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region from llama_models.llama3.api.model import Transformer, TransformerBlock + +from termcolor import cprint +from torch import Tensor + from llama_stack.apis.inference import QuantizationType from llama_stack.apis.inference.config import ( @@ -21,9 +25,6 @@ from llama_stack.apis.inference.config import ( MetaReferenceImplConfig, ) -from termcolor import cprint -from torch import Tensor - def is_fbgemm_available() -> bool: try: