From 25887c1846de9dc91d711ab7c6e5cede6cba70ee Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 5 Sep 2024 10:09:44 -0700 Subject: [PATCH] fix import error --- litellm/__init__.py | 4 ++-- litellm/files/main.py | 2 +- .../litellm_core_utils/audio_utils/utils.py | 23 +++++++++++++++++++ litellm/litellm_core_utils/core_helpers.py | 19 --------------- .../llms/AzureOpenAI/audio_transcriptions.py | 2 +- litellm/llms/OpenAI/audio_transcriptions.py | 2 +- .../text_to_speech/text_to_speech_handler.py | 2 +- .../client_initalization_utils.py | 6 +++-- litellm/utils.py | 12 ++++++---- 9 files changed, 40 insertions(+), 32 deletions(-) create mode 100644 litellm/litellm_core_utils/audio_utils/utils.py diff --git a/litellm/__init__.py b/litellm/__init__.py index 8b28ab80c..ce753b110 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -923,7 +923,7 @@ from .llms.bedrock.embed.amazon_titan_v2_transformation import ( AmazonTitanV2Config, ) from .llms.bedrock.embed.cohere_transformation import BedrockCohereEmbeddingConfig -from .llms.openai import ( +from .llms.OpenAI.openai import ( OpenAIConfig, OpenAITextCompletionConfig, MistralConfig, @@ -938,7 +938,7 @@ from .llms.AI21.chat import AI21ChatConfig from .llms.fireworks_ai import FireworksAIConfig from .llms.volcengine import VolcEngineConfig from .llms.text_completion_codestral import MistralTextCompletionConfig -from .llms.azure import ( +from .llms.AzureOpenAI.azure import ( AzureOpenAIConfig, AzureOpenAIError, AzureOpenAIAssistantsAPIConfig, diff --git a/litellm/files/main.py b/litellm/files/main.py index 1ed1c1e61..84fb50652 100644 --- a/litellm/files/main.py +++ b/litellm/files/main.py @@ -16,7 +16,7 @@ import httpx import litellm from litellm import client, get_secret from litellm.llms.files_apis.azure import AzureOpenAIFilesAPI -from litellm.llms.openai import FileDeleted, FileObject, OpenAIFilesAPI +from litellm.llms.OpenAI.openai import FileDeleted, FileObject, OpenAIFilesAPI from litellm.types.llms.openai import ( Batch, CreateFileRequest, diff --git a/litellm/litellm_core_utils/audio_utils/utils.py b/litellm/litellm_core_utils/audio_utils/utils.py new file mode 100644 index 000000000..ab19dac9c --- /dev/null +++ b/litellm/litellm_core_utils/audio_utils/utils.py @@ -0,0 +1,23 @@ +""" +Utils used for litellm.transcription() and litellm.atranscription() +""" + +from litellm.types.utils import FileTypes + + +def get_audio_file_name(file_obj: FileTypes) -> str: + """ + Safely get the name of a file-like object or return its string representation. + + Args: + file_obj (Any): A file-like object or any other object. + + Returns: + str: The name of the file if available, otherwise a string representation of the object. + """ + if hasattr(file_obj, "name"): + return getattr(file_obj, "name") + elif hasattr(file_obj, "__str__"): + return str(file_obj) + else: + return repr(file_obj) diff --git a/litellm/litellm_core_utils/core_helpers.py b/litellm/litellm_core_utils/core_helpers.py index 9f5075c22..269844ce8 100644 --- a/litellm/litellm_core_utils/core_helpers.py +++ b/litellm/litellm_core_utils/core_helpers.py @@ -4,7 +4,6 @@ import os from typing import List, Literal, Optional, Tuple from litellm._logging import verbose_logger -from litellm.types.utils import FileTypes def map_finish_reason( @@ -87,21 +86,3 @@ def _get_parent_otel_span_from_kwargs(kwargs: Optional[dict] = None): return kwargs["litellm_parent_otel_span"] except: return None - - -def get_audio_file_name(file_obj: FileTypes) -> str: - """ - Safely get the name of a file-like object or return its string representation. - - Args: - file_obj (Any): A file-like object or any other object. - - Returns: - str: The name of the file if available, otherwise a string representation of the object. - """ - if hasattr(file_obj, "name"): - return getattr(file_obj, "name") - elif hasattr(file_obj, "__str__"): - return str(file_obj) - else: - return repr(file_obj) diff --git a/litellm/llms/AzureOpenAI/audio_transcriptions.py b/litellm/llms/AzureOpenAI/audio_transcriptions.py index db373797a..cecdfdc21 100644 --- a/litellm/llms/AzureOpenAI/audio_transcriptions.py +++ b/litellm/llms/AzureOpenAI/audio_transcriptions.py @@ -6,7 +6,7 @@ from openai import AsyncAzureOpenAI, AzureOpenAI from pydantic import BaseModel import litellm -from litellm.litellm_core_utils.core_helpers import get_audio_file_name +from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.types.utils import FileTypes from litellm.utils import TranscriptionResponse, convert_to_model_response_object diff --git a/litellm/llms/OpenAI/audio_transcriptions.py b/litellm/llms/OpenAI/audio_transcriptions.py index 587ee471e..cfa0b0b1a 100644 --- a/litellm/llms/OpenAI/audio_transcriptions.py +++ b/litellm/llms/OpenAI/audio_transcriptions.py @@ -5,7 +5,7 @@ from openai import AsyncOpenAI, OpenAI from pydantic import BaseModel import litellm -from litellm.litellm_core_utils.core_helpers import get_audio_file_name +from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.types.utils import FileTypes from litellm.utils import TranscriptionResponse, convert_to_model_response_object diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py b/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py index 99ebfae1e..bc2424ecc 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py @@ -12,7 +12,7 @@ from litellm.llms.custom_httpx.http_handler import ( _get_async_httpx_client, _get_httpx_client, ) -from litellm.llms.openai import HttpxBinaryResponseContent +from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( VertexLLM, ) diff --git a/litellm/router_utils/client_initalization_utils.py b/litellm/router_utils/client_initalization_utils.py index 9d68891c4..4f750336e 100644 --- a/litellm/router_utils/client_initalization_utils.py +++ b/litellm/router_utils/client_initalization_utils.py @@ -8,7 +8,7 @@ import openai import litellm from litellm._logging import verbose_router_logger -from litellm.llms.azure import get_azure_ad_token_from_oidc +from litellm.llms.AzureOpenAI.azure import get_azure_ad_token_from_oidc from litellm.secret_managers.get_azure_ad_token_provider import ( get_azure_ad_token_provider, ) @@ -337,7 +337,9 @@ def set_client(litellm_router_instance: LitellmRouter, model: dict): azure_client_params["azure_ad_token_provider"] = ( azure_ad_token_provider ) - from litellm.llms.azure import select_azure_base_url_or_endpoint + from litellm.llms.AzureOpenAI.azure import ( + select_azure_base_url_or_endpoint, + ) # this decides if we should set azure_endpoint or base_url on Azure OpenAI Client # required to support GPT-4 vision enhancements, since base_url needs to be set on Azure OpenAI Client diff --git a/litellm/utils.py b/litellm/utils.py index 7587563d5..48bf7b237 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -55,12 +55,10 @@ from tokenizers import Tokenizer import litellm import litellm._service_logger # for storing API inputs, outputs, and metadata import litellm.litellm_core_utils +import litellm.litellm_core_utils.audio_utils.utils import litellm.litellm_core_utils.json_validation_rule from litellm.caching import DualCache -from litellm.litellm_core_utils.core_helpers import ( - get_audio_file_name, - map_finish_reason, -) +from litellm.litellm_core_utils.core_helpers import map_finish_reason from litellm.litellm_core_utils.exception_mapping_utils import get_error_message from litellm.litellm_core_utils.get_llm_provider_logic import ( _is_non_openai_azure_model, @@ -567,7 +565,11 @@ def function_setup( or call_type == CallTypes.transcription.value ): _file_obj: FileTypes = args[1] if len(args) > 1 else kwargs["file"] - file_checksum = get_audio_file_name(file_obj=_file_obj) + file_checksum = ( + litellm.litellm_core_utils.audio_utils.utils.get_audio_file_name( + file_obj=_file_obj + ) + ) if "metadata" in kwargs: kwargs["metadata"]["file_checksum"] = file_checksum else: