forked from phoenix-oss/llama-stack-mirror
fix: resolve type hint issues and import dependencies (#1176)
# What does this PR do? - Fixed type hinting and missing imports across multiple modules. - Improved compatibility by using `TYPE_CHECKING` for conditional imports. - Updated `pyproject.toml` to enforce stricter linting. Signed-off-by: Sébastien Han <seb@redhat.com> Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
1a044ef894
commit
c223b1862b
7 changed files with 14 additions and 5 deletions
|
@ -231,7 +231,7 @@ def worker_process_entrypoint(
|
|||
while True:
|
||||
try:
|
||||
task = req_gen.send(result)
|
||||
if isinstance(task, str) and task == _END_SENTINEL:
|
||||
if isinstance(task, str) and task == EndSentinel():
|
||||
break
|
||||
|
||||
assert isinstance(task, TaskRequest)
|
||||
|
|
|
@ -12,6 +12,7 @@ import os
|
|||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import torch
|
||||
from fairscale.nn.model_parallel.initialize import get_model_parallel_rank
|
||||
from fairscale.nn.model_parallel.layers import ColumnParallelLinear, RowParallelLinear
|
||||
from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region
|
||||
from llama_models.llama3.api.args import ModelArgs
|
||||
|
|
|
@ -73,7 +73,10 @@ def show():
|
|||
image_data.append({"image_base64": image_base64})
|
||||
buf.close()
|
||||
|
||||
req_con, resp_con = _open_connections()
|
||||
# The _open_connections method is dynamically made available to
|
||||
# the interpreter by bundling code from "code_env_prefix.py" -- by literally prefixing it -- and
|
||||
# then "eval"ing it within a sandboxed interpreter.
|
||||
req_con, resp_con = _open_connections() # noqa: F821
|
||||
|
||||
_json_dump = _json.dumps(
|
||||
{
|
||||
|
|
|
@ -24,6 +24,7 @@ from llama_stack.apis.inference import (
|
|||
SamplingParams,
|
||||
TextTruncation,
|
||||
ToolChoice,
|
||||
ToolConfig,
|
||||
ToolDefinition,
|
||||
ToolPromptFormat,
|
||||
)
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.job_types import JobStatus
|
||||
|
|
|
@ -5,7 +5,10 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
import logging
|
||||
from typing import List, Optional
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sentence_transformers import SentenceTransformer
|
||||
|
||||
from llama_stack.apis.inference import (
|
||||
EmbeddingsResponse,
|
||||
|
@ -40,7 +43,7 @@ class SentenceTransformerEmbeddingMixin:
|
|||
)
|
||||
return EmbeddingsResponse(embeddings=embeddings)
|
||||
|
||||
def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer":
|
||||
def _load_sentence_transformer_model(self, model: str) -> SentenceTransformer:
|
||||
global EMBEDDING_MODELS
|
||||
|
||||
loaded_model = EMBEDDING_MODELS.get(model)
|
||||
|
|
|
@ -129,7 +129,6 @@ ignore = [
|
|||
"E721",
|
||||
"E741",
|
||||
"F405",
|
||||
"F821",
|
||||
"F841",
|
||||
"C408", # ignored because we like the dict keyword argument syntax
|
||||
"E302",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue