fix import error

This commit is contained in:
Ishaan Jaff 2024-09-05 10:09:44 -07:00
parent ed627bc5d2
commit 25887c1846
9 changed files with 40 additions and 32 deletions

View file

@ -923,7 +923,7 @@ from .llms.bedrock.embed.amazon_titan_v2_transformation import (
AmazonTitanV2Config,
)
from .llms.bedrock.embed.cohere_transformation import BedrockCohereEmbeddingConfig
from .llms.openai import (
from .llms.OpenAI.openai import (
OpenAIConfig,
OpenAITextCompletionConfig,
MistralConfig,
@ -938,7 +938,7 @@ from .llms.AI21.chat import AI21ChatConfig
from .llms.fireworks_ai import FireworksAIConfig
from .llms.volcengine import VolcEngineConfig
from .llms.text_completion_codestral import MistralTextCompletionConfig
from .llms.azure import (
from .llms.AzureOpenAI.azure import (
AzureOpenAIConfig,
AzureOpenAIError,
AzureOpenAIAssistantsAPIConfig,

View file

@ -16,7 +16,7 @@ import httpx
import litellm
from litellm import client, get_secret
from litellm.llms.files_apis.azure import AzureOpenAIFilesAPI
from litellm.llms.openai import FileDeleted, FileObject, OpenAIFilesAPI
from litellm.llms.OpenAI.openai import FileDeleted, FileObject, OpenAIFilesAPI
from litellm.types.llms.openai import (
Batch,
CreateFileRequest,

View file

@ -0,0 +1,23 @@
"""
Utils used for litellm.transcription() and litellm.atranscription()
"""
from litellm.types.utils import FileTypes
def get_audio_file_name(file_obj: FileTypes) -> str:
"""
Safely get the name of a file-like object or return its string representation.
Args:
file_obj (Any): A file-like object or any other object.
Returns:
str: The name of the file if available, otherwise a string representation of the object.
"""
if hasattr(file_obj, "name"):
return getattr(file_obj, "name")
elif hasattr(file_obj, "__str__"):
return str(file_obj)
else:
return repr(file_obj)

View file

@ -4,7 +4,6 @@ import os
from typing import List, Literal, Optional, Tuple
from litellm._logging import verbose_logger
from litellm.types.utils import FileTypes
def map_finish_reason(
@ -87,21 +86,3 @@ def _get_parent_otel_span_from_kwargs(kwargs: Optional[dict] = None):
return kwargs["litellm_parent_otel_span"]
except:
return None
def get_audio_file_name(file_obj: FileTypes) -> str:
"""
Safely get the name of a file-like object or return its string representation.
Args:
file_obj (Any): A file-like object or any other object.
Returns:
str: The name of the file if available, otherwise a string representation of the object.
"""
if hasattr(file_obj, "name"):
return getattr(file_obj, "name")
elif hasattr(file_obj, "__str__"):
return str(file_obj)
else:
return repr(file_obj)

View file

@ -6,7 +6,7 @@ from openai import AsyncAzureOpenAI, AzureOpenAI
from pydantic import BaseModel
import litellm
from litellm.litellm_core_utils.core_helpers import get_audio_file_name
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.types.utils import FileTypes
from litellm.utils import TranscriptionResponse, convert_to_model_response_object

View file

@ -5,7 +5,7 @@ from openai import AsyncOpenAI, OpenAI
from pydantic import BaseModel
import litellm
from litellm.litellm_core_utils.core_helpers import get_audio_file_name
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.types.utils import FileTypes
from litellm.utils import TranscriptionResponse, convert_to_model_response_object

View file

@ -12,7 +12,7 @@ from litellm.llms.custom_httpx.http_handler import (
_get_async_httpx_client,
_get_httpx_client,
)
from litellm.llms.openai import HttpxBinaryResponseContent
from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
VertexLLM,
)

View file

@ -8,7 +8,7 @@ import openai
import litellm
from litellm._logging import verbose_router_logger
from litellm.llms.azure import get_azure_ad_token_from_oidc
from litellm.llms.AzureOpenAI.azure import get_azure_ad_token_from_oidc
from litellm.secret_managers.get_azure_ad_token_provider import (
get_azure_ad_token_provider,
)
@ -337,7 +337,9 @@ def set_client(litellm_router_instance: LitellmRouter, model: dict):
azure_client_params["azure_ad_token_provider"] = (
azure_ad_token_provider
)
from litellm.llms.azure import select_azure_base_url_or_endpoint
from litellm.llms.AzureOpenAI.azure import (
select_azure_base_url_or_endpoint,
)
# this decides if we should set azure_endpoint or base_url on Azure OpenAI Client
# required to support GPT-4 vision enhancements, since base_url needs to be set on Azure OpenAI Client

View file

@ -55,12 +55,10 @@ from tokenizers import Tokenizer
import litellm
import litellm._service_logger # for storing API inputs, outputs, and metadata
import litellm.litellm_core_utils
import litellm.litellm_core_utils.audio_utils.utils
import litellm.litellm_core_utils.json_validation_rule
from litellm.caching import DualCache
from litellm.litellm_core_utils.core_helpers import (
get_audio_file_name,
map_finish_reason,
)
from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.litellm_core_utils.exception_mapping_utils import get_error_message
from litellm.litellm_core_utils.get_llm_provider_logic import (
_is_non_openai_azure_model,
@ -567,7 +565,11 @@ def function_setup(
or call_type == CallTypes.transcription.value
):
_file_obj: FileTypes = args[1] if len(args) > 1 else kwargs["file"]
file_checksum = get_audio_file_name(file_obj=_file_obj)
file_checksum = (
litellm.litellm_core_utils.audio_utils.utils.get_audio_file_name(
file_obj=_file_obj
)
)
if "metadata" in kwargs:
kwargs["metadata"]["file_checksum"] = file_checksum
else: