forked from phoenix/litellm-mirror
build: fix mypy build issues
This commit is contained in:
parent
8e8c4e214e
commit
84a540f2d6
5 changed files with 18 additions and 18 deletions
|
@ -16,11 +16,11 @@ repos:
|
|||
name: Check if files match
|
||||
entry: python3 ci_cd/check_files_match.py
|
||||
language: system
|
||||
# - repo: local
|
||||
# hooks:
|
||||
# - id: mypy
|
||||
# name: mypy
|
||||
# entry: python3 -m mypy --ignore-missing-imports
|
||||
# language: system
|
||||
# types: [python]
|
||||
# files: ^litellm/
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: python3 -m mypy --ignore-missing-imports
|
||||
language: system
|
||||
types: [python]
|
||||
files: ^litellm/
|
|
@ -118,7 +118,7 @@ def completion(
|
|||
logger_fn=None,
|
||||
):
|
||||
try:
|
||||
import google.generativeai as genai
|
||||
import google.generativeai as genai # type: ignore
|
||||
except:
|
||||
raise Exception(
|
||||
"Importing google.generativeai failed, please run 'pip install -q google-generativeai"
|
||||
|
@ -308,7 +308,7 @@ async def async_completion(
|
|||
messages,
|
||||
encoding,
|
||||
):
|
||||
import google.generativeai as genai
|
||||
import google.generativeai as genai # type: ignore
|
||||
|
||||
response = await _model.generate_content_async(
|
||||
contents=prompt,
|
||||
|
|
|
@ -98,7 +98,7 @@ def completion(
|
|||
logger_fn=None,
|
||||
):
|
||||
try:
|
||||
import google.generativeai as palm
|
||||
import google.generativeai as palm # type: ignore
|
||||
except:
|
||||
raise Exception(
|
||||
"Importing google.generativeai failed, please run 'pip install -q google-generativeai"
|
||||
|
|
|
@ -923,7 +923,7 @@ def gemini_text_image_pt(messages: list):
|
|||
}
|
||||
"""
|
||||
try:
|
||||
import google.generativeai as genai
|
||||
import google.generativeai as genai # type: ignore
|
||||
except:
|
||||
raise Exception(
|
||||
"Importing google.generativeai failed, please run 'pip install -q google-generativeai"
|
||||
|
|
|
@ -289,11 +289,11 @@ def completion(
|
|||
Part,
|
||||
GenerationConfig,
|
||||
)
|
||||
from google.cloud import aiplatform
|
||||
from google.cloud import aiplatform # type: ignore
|
||||
from google.protobuf import json_format # type: ignore
|
||||
from google.protobuf.struct_pb2 import Value # type: ignore
|
||||
from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types
|
||||
import google.auth
|
||||
from google.cloud.aiplatform_v1beta1.types import content as gapic_content_types # type: ignore
|
||||
import google.auth # type: ignore
|
||||
|
||||
## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744
|
||||
print_verbose(
|
||||
|
@ -783,7 +783,7 @@ async def async_completion(
|
|||
"""
|
||||
Vertex AI Model Garden
|
||||
"""
|
||||
from google.cloud import aiplatform
|
||||
from google.cloud import aiplatform # type: ignore
|
||||
|
||||
## LOGGING
|
||||
logging_obj.pre_call(
|
||||
|
@ -969,7 +969,7 @@ async def async_streaming(
|
|||
)
|
||||
response = llm_model.predict_streaming_async(prompt, **optional_params)
|
||||
elif mode == "custom":
|
||||
from google.cloud import aiplatform
|
||||
from google.cloud import aiplatform # type: ignore
|
||||
|
||||
stream = optional_params.pop("stream", None)
|
||||
|
||||
|
@ -1059,7 +1059,7 @@ def embedding(
|
|||
)
|
||||
|
||||
from vertexai.language_models import TextEmbeddingModel
|
||||
import google.auth
|
||||
import google.auth # type: ignore
|
||||
|
||||
## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744
|
||||
try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue