Litellm remove circular imports (#7232)

* fix(utils.py): initial commit to remove circular imports - moves llmproviders to utils.py

* fix(router.py): fix 'litellm.EmbeddingResponse' import from router.py

'

* refactor: fix litellm.ModelResponse import on pass through endpoints

* refactor(litellm_logging.py): fix circular import for custom callbacks literal

* fix(factory.py): fix circular imports inside prompt factory

* fix(cost_calculator.py): fix circular import for 'litellm.Usage'

* fix(proxy_server.py): fix potential circular import with `litellm.Router'

* fix(proxy/utils.py): fix potential circular import in `litellm.Router`

* fix: remove circular imports in 'auth_checks' and 'guardrails/'

* fix(prompt_injection_detection.py): fix router impor t

* fix(vertex_passthrough_logging_handler.py): fix potential circular imports in vertex pass through

* fix(anthropic_pass_through_logging_handler.py): fix potential circular imports

* fix(slack_alerting.py-+-ollama_chat.py): fix modelresponse import

* fix(base.py): fix potential circular import

* fix(handler.py): fix potential circular ref in codestral + cohere handler's

* fix(azure.py): fix potential circular imports

* fix(gpt_transformation.py): fix modelresponse import

* fix(litellm_logging.py): add logging base class - simplify typing

makes it easy for other files to type check the logging obj without introducing circular imports

* fix(azure_ai/embed): fix potential circular import on handler.py

* fix(databricks/): fix potential circular imports in databricks/

* fix(vertex_ai/): fix potential circular imports on vertex ai embeddings

* fix(vertex_ai/image_gen): fix import

* fix(watsonx-+-bedrock): cleanup imports

* refactor(anthropic-pass-through-+-petals): cleanup imports

* refactor(huggingface/): cleanup imports

* fix(ollama-+-clarifai): cleanup circular imports

* fix(openai_like/): fix impor t

* fix(openai_like/): fix embedding handler

cleanup imports

* refactor(openai.py): cleanup imports

* fix(sagemaker/transformation.py): fix import

* ci(config.yml): add circular import test to ci/cd
This commit is contained in:
Krish Dholakia 2024-12-14 16:28:34 -08:00 committed by GitHub
parent 0dbf71291e
commit 516c2a6a70
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
48 changed files with 489 additions and 256 deletions

View file

@ -28,24 +28,31 @@ import litellm
from litellm import LlmProviders
from litellm._logging import verbose_logger
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.litellm_core_utils.prompt_templates.factory import (
custom_prompt,
prompt_factory,
)
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
from litellm.secret_managers.main import get_secret_str
from litellm.types.utils import ProviderField
from litellm.types.utils import (
EmbeddingResponse,
ImageResponse,
ModelResponse,
ProviderField,
TextCompletionResponse,
Usage,
)
from litellm.utils import (
Choices,
CustomStreamWrapper,
Message,
ModelResponse,
ProviderConfigManager,
TextCompletionResponse,
Usage,
convert_to_model_response_object,
)
from ...types.llms.openai import *
from ..base import BaseLLM
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from .chat.gpt_transformation import OpenAIGPTConfig
from .common_utils import OpenAIError, drop_params_from_unprocessable_entity_error
@ -882,7 +889,7 @@ class OpenAIChatCompletion(BaseLLM):
self,
input: list,
data: dict,
model_response: litellm.utils.EmbeddingResponse,
model_response: EmbeddingResponse,
timeout: float,
logging_obj: LiteLLMLoggingObj,
api_key: Optional[str] = None,
@ -911,9 +918,7 @@ class OpenAIChatCompletion(BaseLLM):
additional_args={"complete_input_dict": data},
original_response=stringified_response,
)
returned_response: (
litellm.EmbeddingResponse
) = convert_to_model_response_object(
returned_response: EmbeddingResponse = convert_to_model_response_object(
response_object=stringified_response,
model_response_object=model_response,
response_type="embedding",
@ -953,14 +958,14 @@ class OpenAIChatCompletion(BaseLLM):
input: list,
timeout: float,
logging_obj,
model_response: litellm.utils.EmbeddingResponse,
model_response: EmbeddingResponse,
optional_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
client=None,
aembedding=None,
max_retries: Optional[int] = None,
) -> litellm.EmbeddingResponse:
) -> EmbeddingResponse:
super().embedding()
try:
model = model
@ -1011,7 +1016,7 @@ class OpenAIChatCompletion(BaseLLM):
additional_args={"complete_input_dict": data},
original_response=sync_embedding_response,
)
response: litellm.EmbeddingResponse = convert_to_model_response_object(
response: EmbeddingResponse = convert_to_model_response_object(
response_object=sync_embedding_response.model_dump(),
model_response_object=model_response,
_response_headers=headers,
@ -1068,7 +1073,7 @@ class OpenAIChatCompletion(BaseLLM):
except Exception as e:
## LOGGING
logging_obj.post_call(
input=input,
input=prompt,
api_key=api_key,
original_response=str(e),
)
@ -1083,10 +1088,10 @@ class OpenAIChatCompletion(BaseLLM):
logging_obj: Any,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
model_response: Optional[litellm.utils.ImageResponse] = None,
model_response: Optional[ImageResponse] = None,
client=None,
aimg_generation=None,
) -> litellm.ImageResponse:
) -> ImageResponse:
data = {}
try:
model = model