mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
Code Quality Improvement - use vertex_ai/
as folder name for vertexAI (#7166)
* fix rename vertex ai * run ci/cd again
This commit is contained in:
parent
26918487d6
commit
e09d3761d8
47 changed files with 58 additions and 58 deletions
|
@ -1085,7 +1085,7 @@ from .llms.deprecated_providers.palm import (
|
||||||
from .llms.nlp_cloud.chat.handler import NLPCloudConfig
|
from .llms.nlp_cloud.chat.handler import NLPCloudConfig
|
||||||
from .llms.aleph_alpha import AlephAlphaConfig
|
from .llms.aleph_alpha import AlephAlphaConfig
|
||||||
from .llms.petals import PetalsConfig
|
from .llms.petals import PetalsConfig
|
||||||
from .llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from .llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexGeminiConfig,
|
VertexGeminiConfig,
|
||||||
GoogleAIStudioGeminiConfig,
|
GoogleAIStudioGeminiConfig,
|
||||||
VertexAIConfig,
|
VertexAIConfig,
|
||||||
|
@ -1093,19 +1093,19 @@ from .llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gem
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_embeddings.transformation import (
|
from .llms.vertex_ai.vertex_embeddings.transformation import (
|
||||||
VertexAITextEmbeddingConfig,
|
VertexAITextEmbeddingConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
vertexAITextEmbeddingConfig = VertexAITextEmbeddingConfig()
|
vertexAITextEmbeddingConfig = VertexAITextEmbeddingConfig()
|
||||||
|
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.anthropic.transformation import (
|
from .llms.vertex_ai.vertex_ai_partner_models.anthropic.transformation import (
|
||||||
VertexAIAnthropicConfig,
|
VertexAIAnthropicConfig,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.llama3.transformation import (
|
from .llms.vertex_ai.vertex_ai_partner_models.llama3.transformation import (
|
||||||
VertexAILlama3Config,
|
VertexAILlama3Config,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.ai21.transformation import (
|
from .llms.vertex_ai.vertex_ai_partner_models.ai21.transformation import (
|
||||||
VertexAIAi21Config,
|
VertexAIAi21Config,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ import litellm
|
||||||
from litellm import client
|
from litellm import client
|
||||||
from litellm.llms.azure.azure import AzureBatchesAPI
|
from litellm.llms.azure.azure import AzureBatchesAPI
|
||||||
from litellm.llms.openai.openai import OpenAIBatchesAPI
|
from litellm.llms.openai.openai import OpenAIBatchesAPI
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.batches.handler import (
|
from litellm.llms.vertex_ai.batches.handler import (
|
||||||
VertexAIBatchPrediction,
|
VertexAIBatchPrediction,
|
||||||
)
|
)
|
||||||
from litellm.secret_managers.main import get_secret, get_secret_str
|
from litellm.secret_managers.main import get_secret, get_secret_str
|
||||||
|
|
|
@ -46,7 +46,7 @@ from litellm.llms.openai.cost_calculation import (
|
||||||
from litellm.llms.openai.cost_calculation import cost_per_token as openai_cost_per_token
|
from litellm.llms.openai.cost_calculation import cost_per_token as openai_cost_per_token
|
||||||
from litellm.llms.openai.cost_calculation import cost_router as openai_cost_router
|
from litellm.llms.openai.cost_calculation import cost_router as openai_cost_router
|
||||||
from litellm.llms.together_ai.cost_calculator import get_model_params_and_category
|
from litellm.llms.together_ai.cost_calculator import get_model_params_and_category
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.image_generation.cost_calculator import (
|
from litellm.llms.vertex_ai.image_generation.cost_calculator import (
|
||||||
cost_calculator as vertex_ai_image_cost_calculator,
|
cost_calculator as vertex_ai_image_cost_calculator,
|
||||||
)
|
)
|
||||||
from litellm.types.llms.openai import HttpxBinaryResponseContent
|
from litellm.types.llms.openai import HttpxBinaryResponseContent
|
||||||
|
|
|
@ -17,7 +17,7 @@ import litellm
|
||||||
from litellm import client, get_secret_str
|
from litellm import client, get_secret_str
|
||||||
from litellm.llms.azure.files.handler import AzureOpenAIFilesAPI
|
from litellm.llms.azure.files.handler import AzureOpenAIFilesAPI
|
||||||
from litellm.llms.openai.openai import FileDeleted, FileObject, OpenAIFilesAPI
|
from litellm.llms.openai.openai import FileDeleted, FileObject, OpenAIFilesAPI
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.files.handler import (
|
from litellm.llms.vertex_ai.files.handler import (
|
||||||
VertexAIFilesHandler,
|
VertexAIFilesHandler,
|
||||||
)
|
)
|
||||||
from litellm.types.llms.openai import (
|
from litellm.types.llms.openai import (
|
||||||
|
|
|
@ -20,7 +20,7 @@ import litellm
|
||||||
from litellm._logging import verbose_logger
|
from litellm._logging import verbose_logger
|
||||||
from litellm.llms.azure.fine_tuning.handler import AzureOpenAIFineTuningAPI
|
from litellm.llms.azure.fine_tuning.handler import AzureOpenAIFineTuningAPI
|
||||||
from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI, FineTuningJob, FineTuningJobCreate
|
from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI, FineTuningJob, FineTuningJobCreate
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.fine_tuning.handler import VertexFineTuningAPI
|
from litellm.llms.vertex_ai.fine_tuning.handler import VertexFineTuningAPI
|
||||||
from litellm.secret_managers.main import get_secret_str
|
from litellm.secret_managers.main import get_secret_str
|
||||||
from litellm.types.llms.openai import Hyperparameters
|
from litellm.types.llms.openai import Hyperparameters
|
||||||
from litellm.types.router import *
|
from litellm.types.router import *
|
||||||
|
|
|
@ -24,7 +24,7 @@ from litellm.types.utils import (
|
||||||
)
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase
|
from litellm.llms.vertex_ai.vertex_llm_base import VertexBase
|
||||||
else:
|
else:
|
||||||
VertexBase = Any
|
VertexBase = Any
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ from litellm.types.utils import (
|
||||||
)
|
)
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase
|
from litellm.llms.vertex_ai.vertex_llm_base import VertexBase
|
||||||
else:
|
else:
|
||||||
VertexBase = Any
|
VertexBase = Any
|
||||||
IAM_AUTH_KEY = "IAM_AUTH"
|
IAM_AUTH_KEY = "IAM_AUTH"
|
||||||
|
@ -190,7 +190,7 @@ class GCSBucketBase(CustomBatchLogger):
|
||||||
This function is used to get the Vertex instance for the GCS Bucket Logger.
|
This function is used to get the Vertex instance for the GCS Bucket Logger.
|
||||||
It checks if the Vertex instance is already created and cached, if not it creates a new instance and caches it.
|
It checks if the Vertex instance is already created and cached, if not it creates a new instance and caches it.
|
||||||
"""
|
"""
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import (
|
from litellm.llms.vertex_ai.vertex_llm_base import (
|
||||||
VertexBase,
|
VertexBase,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
_get_httpx_client,
|
_get_httpx_client,
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexAIError,
|
VertexAIError,
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
|
@ -1,7 +1,7 @@
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Any, Dict, Literal
|
from typing import Any, Dict, Literal
|
||||||
|
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.common_utils import (
|
from litellm.llms.vertex_ai.common_utils import (
|
||||||
_convert_vertex_datetime_to_openai_datetime,
|
_convert_vertex_datetime_to_openai_datetime,
|
||||||
)
|
)
|
||||||
from litellm.types.llms.openai import Batch, BatchJobStatus, CreateBatchRequest
|
from litellm.types.llms.openai import Batch, BatchJobStatus, CreateBatchRequest
|
|
@ -15,10 +15,10 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
_get_httpx_client,
|
_get_httpx_client,
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.common_utils import (
|
from litellm.llms.vertex_ai.common_utils import (
|
||||||
_convert_vertex_datetime_to_openai_datetime,
|
_convert_vertex_datetime_to_openai_datetime,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexAIError,
|
VertexAIError,
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
|
@ -2,13 +2,13 @@ import json
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.common_utils import (
|
from litellm.llms.vertex_ai.common_utils import (
|
||||||
_convert_vertex_datetime_to_openai_datetime,
|
_convert_vertex_datetime_to_openai_datetime,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
|
from litellm.llms.vertex_ai.gemini.transformation import (
|
||||||
_transform_request_body,
|
_transform_request_body,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexGeminiConfig,
|
VertexGeminiConfig,
|
||||||
)
|
)
|
||||||
from litellm.types.llms.openai import (
|
from litellm.types.llms.openai import (
|
|
@ -12,7 +12,7 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
HTTPHandler,
|
HTTPHandler,
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
from litellm.types.llms.openai import FineTuningJobCreate
|
from litellm.types.llms.openai import FineTuningJobCreate
|
|
@ -10,7 +10,7 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
HTTPHandler,
|
HTTPHandler,
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,7 +10,7 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
HTTPHandler,
|
HTTPHandler,
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexAIError,
|
VertexAIError,
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
|
@ -14,7 +14,7 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.openai.openai import HttpxBinaryResponseContent
|
from litellm.llms.openai.openai import HttpxBinaryResponseContent
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
|
|
|
@ -94,7 +94,7 @@ class VertexAIPartnerModels(VertexBase):
|
||||||
from litellm.llms.openai.openai import OpenAIChatCompletion
|
from litellm.llms.openai.openai import OpenAIChatCompletion
|
||||||
from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler
|
from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler
|
||||||
from litellm.llms.text_completion_codestral import CodestralTextCompletion
|
from litellm.llms.text_completion_codestral import CodestralTextCompletion
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
|
@ -15,10 +15,10 @@ from litellm.llms.custom_httpx.http_handler import (
|
||||||
_get_httpx_client,
|
_get_httpx_client,
|
||||||
get_async_httpx_client,
|
get_async_httpx_client,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_non_gemini import (
|
from litellm.llms.vertex_ai.vertex_ai_non_gemini import (
|
||||||
VertexAIError,
|
VertexAIError,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase
|
from litellm.llms.vertex_ai.vertex_llm_base import VertexBase
|
||||||
from litellm.types.llms.vertex_ai import *
|
from litellm.types.llms.vertex_ai import *
|
||||||
from litellm.utils import Usage
|
from litellm.utils import Usage
|
||||||
|
|
|
@ -79,7 +79,7 @@ class VertexAIModelGardenModels(VertexBase):
|
||||||
from litellm.llms.openai.openai import OpenAIChatCompletion
|
from litellm.llms.openai.openai import OpenAIChatCompletion
|
||||||
from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler
|
from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler
|
||||||
from litellm.llms.text_completion_codestral import CodestralTextCompletion
|
from litellm.llms.text_completion_codestral import CodestralTextCompletion
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
|
@ -125,29 +125,29 @@ from .llms.replicate.chat.handler import completion as replicate_chat_completion
|
||||||
from .llms.text_completion_codestral import CodestralTextCompletion
|
from .llms.text_completion_codestral import CodestralTextCompletion
|
||||||
from .llms.together_ai.completion.handler import TogetherAITextCompletion
|
from .llms.together_ai.completion.handler import TogetherAITextCompletion
|
||||||
from .llms.triton import TritonChatCompletion
|
from .llms.triton import TritonChatCompletion
|
||||||
from .llms.vertex_ai_and_google_ai_studio import vertex_ai_non_gemini
|
from .llms.vertex_ai import vertex_ai_non_gemini
|
||||||
from .llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from .llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.gemini_embeddings.batch_embed_content_handler import (
|
from .llms.vertex_ai.gemini_embeddings.batch_embed_content_handler import (
|
||||||
GoogleBatchEmbeddings,
|
GoogleBatchEmbeddings,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.image_generation.image_generation_handler import (
|
from .llms.vertex_ai.image_generation.image_generation_handler import (
|
||||||
VertexImageGeneration,
|
VertexImageGeneration,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.multimodal_embeddings.embedding_handler import (
|
from .llms.vertex_ai.multimodal_embeddings.embedding_handler import (
|
||||||
VertexMultimodalEmbedding,
|
VertexMultimodalEmbedding,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.text_to_speech.text_to_speech_handler import (
|
from .llms.vertex_ai.text_to_speech.text_to_speech_handler import (
|
||||||
VertexTextToSpeechAPI,
|
VertexTextToSpeechAPI,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.main import (
|
from .llms.vertex_ai.vertex_ai_partner_models.main import (
|
||||||
VertexAIPartnerModels,
|
VertexAIPartnerModels,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_embeddings.embedding_handler import (
|
from .llms.vertex_ai.vertex_embeddings.embedding_handler import (
|
||||||
VertexEmbedding,
|
VertexEmbedding,
|
||||||
)
|
)
|
||||||
from .llms.vertex_ai_and_google_ai_studio.vertex_model_garden.main import (
|
from .llms.vertex_ai.vertex_model_garden.main import (
|
||||||
VertexAIModelGardenModels,
|
VertexAIModelGardenModels,
|
||||||
)
|
)
|
||||||
from .llms.vllm.completion import handler as vllm_handler
|
from .llms.vllm.completion import handler as vllm_handler
|
||||||
|
|
|
@ -11,7 +11,7 @@ from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
|
||||||
from litellm.litellm_core_utils.litellm_logging import (
|
from litellm.litellm_core_utils.litellm_logging import (
|
||||||
get_standard_logging_object_payload,
|
get_standard_logging_object_payload,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
ModelResponseIterator as VertexModelResponseIterator,
|
ModelResponseIterator as VertexModelResponseIterator,
|
||||||
)
|
)
|
||||||
from litellm.proxy._types import PassThroughEndpointLoggingTypedDict
|
from litellm.proxy._types import PassThroughEndpointLoggingTypedDict
|
||||||
|
@ -71,7 +71,7 @@ class VertexPassthroughLoggingHandler:
|
||||||
}
|
}
|
||||||
|
|
||||||
elif "predict" in url_route:
|
elif "predict" in url_route:
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.image_generation.image_generation_handler import (
|
from litellm.llms.vertex_ai.image_generation.image_generation_handler import (
|
||||||
VertexImageGeneration,
|
VertexImageGeneration,
|
||||||
)
|
)
|
||||||
from litellm.types.utils import PassthroughCallTypes
|
from litellm.types.utils import PassthroughCallTypes
|
||||||
|
|
|
@ -23,7 +23,7 @@ from litellm._logging import verbose_proxy_logger
|
||||||
from litellm.integrations.custom_logger import CustomLogger
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
ModelResponseIterator,
|
ModelResponseIterator,
|
||||||
)
|
)
|
||||||
from litellm.proxy._types import (
|
from litellm.proxy._types import (
|
||||||
|
|
|
@ -13,7 +13,7 @@ from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
|
||||||
from litellm.llms.anthropic.chat.handler import (
|
from litellm.llms.anthropic.chat.handler import (
|
||||||
ModelResponseIterator as AnthropicIterator,
|
ModelResponseIterator as AnthropicIterator,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
ModelResponseIterator as VertexAIIterator,
|
ModelResponseIterator as VertexAIIterator,
|
||||||
)
|
)
|
||||||
from litellm.proxy._types import PassThroughEndpointLoggingResultValues
|
from litellm.proxy._types import PassThroughEndpointLoggingResultValues
|
||||||
|
|
|
@ -12,7 +12,7 @@ from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
|
||||||
from litellm.litellm_core_utils.litellm_logging import (
|
from litellm.litellm_core_utils.litellm_logging import (
|
||||||
get_standard_logging_object_payload,
|
get_standard_logging_object_payload,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
from litellm.proxy._types import PassThroughEndpointLoggingResultValues
|
from litellm.proxy._types import PassThroughEndpointLoggingResultValues
|
||||||
|
|
|
@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Any, Dict, Optional, TypedDict
|
||||||
from litellm.types.utils import StandardLoggingPayload
|
from litellm.types.utils import StandardLoggingPayload
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase
|
from litellm.llms.vertex_ai.vertex_llm_base import VertexBase
|
||||||
else:
|
else:
|
||||||
VertexBase = Any
|
VertexBase = Any
|
||||||
|
|
||||||
|
|
|
@ -4086,7 +4086,7 @@ def get_api_base(
|
||||||
_optional_params.vertex_location is not None
|
_optional_params.vertex_location is not None
|
||||||
and _optional_params.vertex_project is not None
|
and _optional_params.vertex_project is not None
|
||||||
):
|
):
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.main import (
|
from litellm.llms.vertex_ai.vertex_ai_partner_models.main import (
|
||||||
VertexPartnerProvider,
|
VertexPartnerProvider,
|
||||||
create_vertex_url,
|
create_vertex_url,
|
||||||
)
|
)
|
||||||
|
|
|
@ -135,10 +135,10 @@ async def test_anthropic_api_max_completion_tokens(model: str):
|
||||||
|
|
||||||
|
|
||||||
def test_all_model_configs():
|
def test_all_model_configs():
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.ai21.transformation import (
|
from litellm.llms.vertex_ai.vertex_ai_partner_models.ai21.transformation import (
|
||||||
VertexAIAi21Config,
|
VertexAIAi21Config,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.llama3.transformation import (
|
from litellm.llms.vertex_ai.vertex_ai_partner_models.llama3.transformation import (
|
||||||
VertexAILlama3Config,
|
VertexAILlama3Config,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ def test_all_model_configs():
|
||||||
optional_params={},
|
optional_params={},
|
||||||
) == {"max_tokens": 10}
|
) == {"max_tokens": 10}
|
||||||
|
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.anthropic.transformation import (
|
from litellm.llms.vertex_ai.vertex_ai_partner_models.anthropic.transformation import (
|
||||||
VertexAIAnthropicConfig,
|
VertexAIAnthropicConfig,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -343,7 +343,7 @@ def test_all_model_configs():
|
||||||
drop_params=False,
|
drop_params=False,
|
||||||
) == {"max_tokens": 10}
|
) == {"max_tokens": 10}
|
||||||
|
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexAIConfig,
|
VertexAIConfig,
|
||||||
GoogleAIStudioGeminiConfig,
|
GoogleAIStudioGeminiConfig,
|
||||||
VertexGeminiConfig,
|
VertexGeminiConfig,
|
||||||
|
|
|
@ -25,7 +25,7 @@ from litellm.litellm_core_utils.prompt_templates.factory import (
|
||||||
from litellm.litellm_core_utils.prompt_templates.common_utils import (
|
from litellm.litellm_core_utils.prompt_templates.common_utils import (
|
||||||
get_completion_messages,
|
get_completion_messages,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
|
from litellm.llms.vertex_ai.gemini.transformation import (
|
||||||
_gemini_convert_messages_with_history,
|
_gemini_convert_messages_with_history,
|
||||||
)
|
)
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
|
@ -95,7 +95,7 @@ def test_completion_pydantic_obj_2():
|
||||||
|
|
||||||
|
|
||||||
def test_build_vertex_schema():
|
def test_build_vertex_schema():
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.common_utils import (
|
from litellm.llms.vertex_ai.common_utils import (
|
||||||
_build_vertex_schema,
|
_build_vertex_schema,
|
||||||
)
|
)
|
||||||
import json
|
import json
|
||||||
|
@ -1121,7 +1121,7 @@ def test_logprobs():
|
||||||
|
|
||||||
def test_process_gemini_image():
|
def test_process_gemini_image():
|
||||||
"""Test the _process_gemini_image function for different image sources"""
|
"""Test the _process_gemini_image function for different image sources"""
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
|
from litellm.llms.vertex_ai.gemini.transformation import (
|
||||||
_process_gemini_image,
|
_process_gemini_image,
|
||||||
)
|
)
|
||||||
from litellm.types.llms.vertex_ai import PartType, FileDataType, BlobType
|
from litellm.types.llms.vertex_ai import PartType, FileDataType, BlobType
|
||||||
|
@ -1171,7 +1171,7 @@ def test_process_gemini_image():
|
||||||
|
|
||||||
def test_get_image_mime_type_from_url():
|
def test_get_image_mime_type_from_url():
|
||||||
"""Test the _get_image_mime_type_from_url function for different image URLs"""
|
"""Test the _get_image_mime_type_from_url function for different image URLs"""
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
|
from litellm.llms.vertex_ai.gemini.transformation import (
|
||||||
_get_image_mime_type_from_url,
|
_get_image_mime_type_from_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1226,7 +1226,7 @@ def test_vertex_embedding_url(model, expected_url):
|
||||||
|
|
||||||
When a fine-tuned embedding model is used, the URL is different from the standard one.
|
When a fine-tuned embedding model is used, the URL is different from the standard one.
|
||||||
"""
|
"""
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.common_utils import _get_vertex_url
|
from litellm.llms.vertex_ai.common_utils import _get_vertex_url
|
||||||
|
|
||||||
url, endpoint = _get_vertex_url(
|
url, endpoint = _get_vertex_url(
|
||||||
mode="embedding",
|
mode="embedding",
|
||||||
|
|
|
@ -32,10 +32,10 @@ from litellm import (
|
||||||
completion_cost,
|
completion_cost,
|
||||||
embedding,
|
embedding,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
|
from litellm.llms.vertex_ai.gemini.transformation import (
|
||||||
_gemini_convert_messages_with_history,
|
_gemini_convert_messages_with_history,
|
||||||
)
|
)
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase
|
from litellm.llms.vertex_ai.vertex_llm_base import VertexBase
|
||||||
|
|
||||||
|
|
||||||
litellm.num_retries = 3
|
litellm.num_retries = 3
|
||||||
|
@ -2338,7 +2338,7 @@ def test_prompt_factory_nested():
|
||||||
|
|
||||||
|
|
||||||
def test_get_token_url():
|
def test_get_token_url():
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||||
VertexLLM,
|
VertexLLM,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -2927,7 +2927,7 @@ def test_gemini_function_call_parameter_in_messages():
|
||||||
|
|
||||||
def test_gemini_function_call_parameter_in_messages_2():
|
def test_gemini_function_call_parameter_in_messages_2():
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
|
from litellm.llms.vertex_ai.gemini.transformation import (
|
||||||
_gemini_convert_messages_with_history,
|
_gemini_convert_messages_with_history,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ from litellm import RateLimitError, Timeout, completion, completion_cost, embedd
|
||||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
||||||
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
|
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
|
||||||
|
|
||||||
# litellm.num_retries=3
|
# litellm.num_retries = 3
|
||||||
|
|
||||||
litellm.cache = None
|
litellm.cache = None
|
||||||
litellm.success_callback = []
|
litellm.success_callback = []
|
||||||
|
|
|
@ -20,7 +20,7 @@ from test_gcs_bucket import load_vertex_ai_credentials
|
||||||
|
|
||||||
from litellm import create_fine_tuning_job
|
from litellm import create_fine_tuning_job
|
||||||
from litellm._logging import verbose_logger
|
from litellm._logging import verbose_logger
|
||||||
from litellm.llms.vertex_ai_and_google_ai_studio.fine_tuning.handler import (
|
from litellm.llms.vertex_ai.fine_tuning.handler import (
|
||||||
FineTuningJobCreate,
|
FineTuningJobCreate,
|
||||||
VertexFineTuningAPI,
|
VertexFineTuningAPI,
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue