pre-commit lint

This commit is contained in:
Xi Yan 2024-09-28 16:04:41 -07:00
parent ced5fb6388
commit 4ae8c63a2b
9 changed files with 19 additions and 14 deletions

View file

@ -4,11 +4,12 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from termcolor import cprint
from llama_stack.apis.inference import (
ChatCompletionResponseEventType,
ChatCompletionResponseStreamChunk,
)
from termcolor import cprint
class LogEvent:

View file

@ -9,12 +9,12 @@ import json
from llama_models.sku_list import resolve_model
from termcolor import colored
from llama_stack.cli.subcommand import Subcommand
from llama_stack.cli.table import print_table
from llama_stack.distribution.utils.serialize import EnumEncoder
from termcolor import colored
class ModelDescribe(Subcommand):
"""Show details about a model"""

View file

@ -46,6 +46,7 @@ class StackRun(Subcommand):
import pkg_resources
import yaml
from llama_stack.distribution.build import ImageType
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR

View file

@ -9,6 +9,10 @@ from typing import Any
from pydantic import BaseModel
from llama_stack.distribution.datatypes import * # noqa: F403
from prompt_toolkit import prompt
from prompt_toolkit.validation import Validator
from termcolor import cprint
from llama_stack.apis.memory.memory import MemoryBankType
from llama_stack.distribution.distribution import (
api_providers,
@ -21,9 +25,6 @@ from llama_stack.distribution.utils.prompt_for_config import prompt_for_config
from llama_stack.providers.impls.meta_reference.safety.config import (
MetaReferenceShieldType,
)
from prompt_toolkit import prompt
from prompt_toolkit.validation import Validator
from termcolor import cprint
def make_routing_entry_type(config_class: Any):

View file

@ -435,13 +435,13 @@ def main(yaml_config: str, port: int = 5000, disable_ipv6: bool = False):
apis_to_serve = set(config.apis_to_serve)
else:
apis_to_serve = set(impls.keys())
for api_str in apis_to_serve:
api = Api(api_str)
endpoints = all_endpoints[api]
impl = impls[api]
provider_spec = specs[api]
if (
isinstance(provider_spec, RemoteProviderSpec)

View file

@ -8,7 +8,9 @@ import os
from pathlib import Path
LLAMA_STACK_CONFIG_DIR = Path(os.getenv("LLAMA_STACK_CONFIG_DIR", os.path.expanduser("~/.llama/")))
LLAMA_STACK_CONFIG_DIR = Path(
os.getenv("LLAMA_STACK_CONFIG_DIR", os.path.expanduser("~/.llama/"))
)
DISTRIBS_BASE_DIR = LLAMA_STACK_CONFIG_DIR / "distributions"

View file

@ -8,7 +8,6 @@ import importlib
from typing import Any, Dict
from llama_stack.distribution.datatypes import * # noqa: F403
from termcolor import cprint
def instantiate_class_type(fully_qualified_name):

View file

@ -15,10 +15,10 @@ from llama_models.sku_list import resolve_model
from together import Together
from llama_stack.apis.inference import * # noqa: F403
from llama_stack.distribution.request_headers import get_request_provider_data
from llama_stack.providers.utils.inference.augment_messages import (
augment_messages_for_tools,
)
from llama_stack.distribution.request_headers import get_request_provider_data
from .config import TogetherImplConfig

View file

@ -14,6 +14,10 @@ import torch
from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region
from llama_models.llama3.api.model import Transformer, TransformerBlock
from termcolor import cprint
from torch import Tensor
from llama_stack.apis.inference import QuantizationType
from llama_stack.apis.inference.config import (
@ -21,9 +25,6 @@ from llama_stack.apis.inference.config import (
MetaReferenceImplConfig,
)
from termcolor import cprint
from torch import Tensor
def is_fbgemm_available() -> bool:
try: