diff --git a/litellm/__init__.py b/litellm/__init__.py index 2a0d1f4e71..6be6f73c17 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -1134,26 +1134,26 @@ from .llms.bedrock.embed.amazon_titan_v2_transformation import ( ) from .llms.cohere.chat.transformation import CohereChatConfig from .llms.bedrock.embed.cohere_transformation import BedrockCohereEmbeddingConfig -from .llms.OpenAI.openai import ( +from .llms.openai.openai import ( OpenAIConfig, MistralEmbeddingConfig, DeepInfraConfig, ) -from litellm.llms.OpenAI.completion.transformation import OpenAITextCompletionConfig +from litellm.llms.openai.completion.transformation import OpenAITextCompletionConfig from .llms.groq.chat.transformation import GroqChatConfig from .llms.azure_ai.chat.transformation import AzureAIStudioConfig from .llms.mistral.mistral_chat_transformation import MistralConfig -from .llms.OpenAI.chat.o1_transformation import ( +from .llms.openai.chat.o1_transformation import ( OpenAIO1Config, ) openAIO1Config = OpenAIO1Config() -from .llms.OpenAI.chat.gpt_transformation import ( +from .llms.openai.chat.gpt_transformation import ( OpenAIGPTConfig, ) openAIGPTConfig = OpenAIGPTConfig() -from .llms.OpenAI.chat.gpt_audio_transformation import ( +from .llms.openai.chat.gpt_audio_transformation import ( OpenAIGPTAudioConfig, ) diff --git a/litellm/assistants/main.py b/litellm/assistants/main.py index 4c71554fa2..76222ca787 100644 --- a/litellm/assistants/main.py +++ b/litellm/assistants/main.py @@ -22,7 +22,7 @@ from litellm.utils import ( ) from ..llms.azure.assistants import AzureAssistantsAPI -from ..llms.OpenAI.openai import OpenAIAssistantsAPI +from ..llms.openai.openai import OpenAIAssistantsAPI from ..types.llms.openai import * from ..types.router import * from .utils import get_optional_params_add_message diff --git a/litellm/batches/main.py b/litellm/batches/main.py index af44924801..23673cd6d3 100644 --- a/litellm/batches/main.py +++ b/litellm/batches/main.py @@ -21,7 +21,7 @@ import httpx import litellm from litellm import client from litellm.llms.azure.azure import AzureBatchesAPI -from litellm.llms.OpenAI.openai import OpenAIBatchesAPI +from litellm.llms.openai.openai import OpenAIBatchesAPI from litellm.llms.vertex_ai_and_google_ai_studio.batches.handler import ( VertexAIBatchPrediction, ) diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py index b9c67870e2..943fcf38b7 100644 --- a/litellm/cost_calculator.py +++ b/litellm/cost_calculator.py @@ -40,11 +40,11 @@ from litellm.llms.databricks.cost_calculator import ( from litellm.llms.fireworks_ai.cost_calculator import ( cost_per_token as fireworks_ai_cost_per_token, ) -from litellm.llms.OpenAI.cost_calculation import ( +from litellm.llms.openai.cost_calculation import ( cost_per_second as openai_cost_per_second, ) -from litellm.llms.OpenAI.cost_calculation import cost_per_token as openai_cost_per_token -from litellm.llms.OpenAI.cost_calculation import cost_router as openai_cost_router +from litellm.llms.openai.cost_calculation import cost_per_token as openai_cost_per_token +from litellm.llms.openai.cost_calculation import cost_router as openai_cost_router from litellm.llms.together_ai.cost_calculator import get_model_params_and_category from litellm.llms.vertex_ai_and_google_ai_studio.image_generation.cost_calculator import ( cost_calculator as vertex_ai_image_cost_calculator, diff --git a/litellm/files/main.py b/litellm/files/main.py index 651d09c274..666ad1f4aa 100644 --- a/litellm/files/main.py +++ b/litellm/files/main.py @@ -16,7 +16,7 @@ import httpx import litellm from litellm import client, get_secret_str from litellm.llms.files_apis.azure import AzureOpenAIFilesAPI -from litellm.llms.OpenAI.openai import FileDeleted, FileObject, OpenAIFilesAPI +from litellm.llms.openai.openai import FileDeleted, FileObject, OpenAIFilesAPI from litellm.llms.vertex_ai_and_google_ai_studio.files.handler import ( VertexAIFilesHandler, ) diff --git a/litellm/fine_tuning/main.py b/litellm/fine_tuning/main.py index e7ce539e16..a4bdcadadd 100644 --- a/litellm/fine_tuning/main.py +++ b/litellm/fine_tuning/main.py @@ -19,11 +19,7 @@ import httpx import litellm from litellm._logging import verbose_logger from litellm.llms.fine_tuning_apis.azure import AzureOpenAIFineTuningAPI -from litellm.llms.fine_tuning_apis.openai import ( - FineTuningJob, - FineTuningJobCreate, - OpenAIFineTuningAPI, -) +from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI, FineTuningJob, FineTuningJobCreate from litellm.llms.fine_tuning_apis.vertex_ai import VertexFineTuningAPI from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import Hyperparameters diff --git a/litellm/llms/azure/assistants.py b/litellm/llms/azure/assistants.py index a4dc9f0bae..55f1e8a196 100644 --- a/litellm/llms/azure/assistants.py +++ b/litellm/llms/azure/assistants.py @@ -476,7 +476,7 @@ class AzureAssistantsAPI(BaseLLM): """ Here's an example: ``` - from litellm.llms.OpenAI.openai import OpenAIAssistantsAPI, MessageData + from litellm.llms.openai.openai import OpenAIAssistantsAPI, MessageData # create thread message: MessageData = {"role": "user", "content": "Hey, how's it going?"} diff --git a/litellm/llms/azure/chat/o1_transformation.py b/litellm/llms/azure/chat/o1_transformation.py index e1677f6811..2ba8841d0a 100644 --- a/litellm/llms/azure/chat/o1_transformation.py +++ b/litellm/llms/azure/chat/o1_transformation.py @@ -18,7 +18,7 @@ from typing import Any, List, Optional, Union import litellm from litellm.types.llms.openai import AllMessageValues, ChatCompletionUserMessage -from ...OpenAI.chat.o1_transformation import OpenAIO1Config +from ...openai.chat.o1_transformation import OpenAIO1Config class AzureOpenAIO1Config(OpenAIO1Config): diff --git a/litellm/llms/azure_ai/chat/handler.py b/litellm/llms/azure_ai/chat/handler.py index ce270d8f66..711d31b2da 100644 --- a/litellm/llms/azure_ai/chat/handler.py +++ b/litellm/llms/azure_ai/chat/handler.py @@ -3,7 +3,7 @@ from typing import Any, Callable, List, Optional, Union from httpx._config import Timeout from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator -from litellm.llms.OpenAI.openai import OpenAIChatCompletion +from litellm.llms.openai.openai import OpenAIChatCompletion from litellm.types.utils import ModelResponse from litellm.utils import CustomStreamWrapper diff --git a/litellm/llms/azure_ai/chat/transformation.py b/litellm/llms/azure_ai/chat/transformation.py index d8924fbb94..e32e8f1685 100644 --- a/litellm/llms/azure_ai/chat/transformation.py +++ b/litellm/llms/azure_ai/chat/transformation.py @@ -2,7 +2,7 @@ from typing import List, Optional, Tuple import litellm from litellm._logging import verbose_logger -from litellm.llms.OpenAI.openai import OpenAIConfig +from litellm.llms.openai.openai import OpenAIConfig from litellm.llms.prompt_templates.common_utils import ( _audio_or_image_in_message_content, convert_content_list_to_str, diff --git a/litellm/llms/azure_ai/embed/handler.py b/litellm/llms/azure_ai/embed/handler.py index 933be7eb81..5b86f0b255 100644 --- a/litellm/llms/azure_ai/embed/handler.py +++ b/litellm/llms/azure_ai/embed/handler.py @@ -16,7 +16,7 @@ from litellm.llms.custom_httpx.http_handler import ( _get_httpx_client, get_async_httpx_client, ) -from litellm.llms.OpenAI.openai import OpenAIChatCompletion +from litellm.llms.openai.openai import OpenAIChatCompletion from litellm.types.llms.azure_ai import ImageEmbeddingRequest from litellm.types.utils import Embedding, EmbeddingResponse from litellm.utils import convert_to_model_response_object, is_base64_encoded diff --git a/litellm/llms/azure_text.py b/litellm/llms/azure_text.py index 9f52f214d7..f72accfb60 100644 --- a/litellm/llms/azure_text.py +++ b/litellm/llms/azure_text.py @@ -20,8 +20,8 @@ from litellm.utils import ( ) from .base import BaseLLM -from .OpenAI.completion.handler import OpenAITextCompletion -from .OpenAI.completion.transformation import OpenAITextCompletionConfig +from .openai.completion.handler import OpenAITextCompletion +from .openai.completion.transformation import OpenAITextCompletionConfig from .prompt_templates.factory import custom_prompt, prompt_factory openai_text_completion_config = OpenAITextCompletionConfig() diff --git a/litellm/llms/databricks/chat/transformation.py b/litellm/llms/databricks/chat/transformation.py index 8a1b468a82..05470f14c8 100644 --- a/litellm/llms/databricks/chat/transformation.py +++ b/litellm/llms/databricks/chat/transformation.py @@ -10,7 +10,7 @@ from pydantic import BaseModel from litellm.types.llms.openai import AllMessageValues from litellm.types.utils import ProviderField -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig from ...prompt_templates.common_utils import ( handle_messages_with_content_list_to_str_conversion, strip_name_from_messages, diff --git a/litellm/llms/deepseek/chat/transformation.py b/litellm/llms/deepseek/chat/transformation.py index 5785bdd509..650bae0773 100644 --- a/litellm/llms/deepseek/chat/transformation.py +++ b/litellm/llms/deepseek/chat/transformation.py @@ -12,7 +12,7 @@ from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage from ....utils import _remove_additional_properties, _remove_strict_from_schema -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig from ...prompt_templates.common_utils import ( handle_messages_with_content_list_to_str_conversion, ) diff --git a/litellm/llms/fireworks_ai/chat/transformation.py b/litellm/llms/fireworks_ai/chat/transformation.py index d1a49b605f..2d22a564d8 100644 --- a/litellm/llms/fireworks_ai/chat/transformation.py +++ b/litellm/llms/fireworks_ai/chat/transformation.py @@ -3,7 +3,7 @@ from typing import Literal, Optional, Tuple, Union from litellm.secret_managers.main import get_secret_str -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig from ..embed.fireworks_ai_transformation import FireworksAIEmbeddingConfig diff --git a/litellm/llms/groq/chat/transformation.py b/litellm/llms/groq/chat/transformation.py index a728d6e96d..267d527618 100644 --- a/litellm/llms/groq/chat/transformation.py +++ b/litellm/llms/groq/chat/transformation.py @@ -17,7 +17,7 @@ from litellm.types.llms.openai import ( ChatCompletionToolParamFunctionChunk, ) -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig class GroqChatConfig(OpenAIGPTConfig): diff --git a/litellm/llms/hosted_vllm/chat/transformation.py b/litellm/llms/hosted_vllm/chat/transformation.py index a1e2cc839e..a48a845a25 100644 --- a/litellm/llms/hosted_vllm/chat/transformation.py +++ b/litellm/llms/hosted_vllm/chat/transformation.py @@ -12,7 +12,7 @@ from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage from ....utils import _remove_additional_properties, _remove_strict_from_schema -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig class HostedVLLMChatConfig(OpenAIGPTConfig): diff --git a/litellm/llms/lm_studio/chat/transformation.py b/litellm/llms/lm_studio/chat/transformation.py index 7d305f8ca4..62dd4dbd7b 100644 --- a/litellm/llms/lm_studio/chat/transformation.py +++ b/litellm/llms/lm_studio/chat/transformation.py @@ -12,7 +12,7 @@ from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage from ....utils import _remove_additional_properties, _remove_strict_from_schema -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig class LMStudioChatConfig(OpenAIGPTConfig): diff --git a/litellm/llms/OpenAI/chat/gpt_audio_transformation.py b/litellm/llms/openai/chat/gpt_audio_transformation.py similarity index 100% rename from litellm/llms/OpenAI/chat/gpt_audio_transformation.py rename to litellm/llms/openai/chat/gpt_audio_transformation.py diff --git a/litellm/llms/OpenAI/chat/gpt_transformation.py b/litellm/llms/openai/chat/gpt_transformation.py similarity index 100% rename from litellm/llms/OpenAI/chat/gpt_transformation.py rename to litellm/llms/openai/chat/gpt_transformation.py diff --git a/litellm/llms/OpenAI/chat/o1_handler.py b/litellm/llms/openai/chat/o1_handler.py similarity index 96% rename from litellm/llms/OpenAI/chat/o1_handler.py rename to litellm/llms/openai/chat/o1_handler.py index 5ff53a896d..e8515ac226 100644 --- a/litellm/llms/OpenAI/chat/o1_handler.py +++ b/litellm/llms/openai/chat/o1_handler.py @@ -10,7 +10,7 @@ from typing import Any, Callable, List, Optional, Union from httpx._config import Timeout from litellm.llms.bedrock.chat.invoke_handler import MockResponseIterator -from litellm.llms.OpenAI.openai import OpenAIChatCompletion +from litellm.llms.openai.openai import OpenAIChatCompletion from litellm.types.utils import ModelResponse from litellm.utils import CustomStreamWrapper diff --git a/litellm/llms/OpenAI/chat/o1_transformation.py b/litellm/llms/openai/chat/o1_transformation.py similarity index 100% rename from litellm/llms/OpenAI/chat/o1_transformation.py rename to litellm/llms/openai/chat/o1_transformation.py diff --git a/litellm/llms/OpenAI/common_utils.py b/litellm/llms/openai/common_utils.py similarity index 100% rename from litellm/llms/OpenAI/common_utils.py rename to litellm/llms/openai/common_utils.py diff --git a/litellm/llms/OpenAI/completion/handler.py b/litellm/llms/openai/completion/handler.py similarity index 100% rename from litellm/llms/OpenAI/completion/handler.py rename to litellm/llms/openai/completion/handler.py diff --git a/litellm/llms/OpenAI/completion/transformation.py b/litellm/llms/openai/completion/transformation.py similarity index 100% rename from litellm/llms/OpenAI/completion/transformation.py rename to litellm/llms/openai/completion/transformation.py diff --git a/litellm/llms/OpenAI/completion/utils.py b/litellm/llms/openai/completion/utils.py similarity index 100% rename from litellm/llms/OpenAI/completion/utils.py rename to litellm/llms/openai/completion/utils.py diff --git a/litellm/llms/OpenAI/cost_calculation.py b/litellm/llms/openai/cost_calculation.py similarity index 100% rename from litellm/llms/OpenAI/cost_calculation.py rename to litellm/llms/openai/cost_calculation.py diff --git a/litellm/llms/fine_tuning_apis/openai.py b/litellm/llms/openai/fine_tuning/handler.py similarity index 98% rename from litellm/llms/fine_tuning_apis/openai.py rename to litellm/llms/openai/fine_tuning/handler.py index 7ce8c35367..00099608c1 100644 --- a/litellm/llms/fine_tuning_apis/openai.py +++ b/litellm/llms/openai/fine_tuning/handler.py @@ -6,11 +6,10 @@ from openai.pagination import AsyncCursorPage from openai.types.fine_tuning import FineTuningJob from litellm._logging import verbose_logger -from litellm.llms.base import BaseLLM from litellm.types.llms.openai import FineTuningJobCreate -class OpenAIFineTuningAPI(BaseLLM): +class OpenAIFineTuningAPI: """ OpenAI methods to support for batches """ diff --git a/litellm/llms/OpenAI/openai.py b/litellm/llms/openai/openai.py similarity index 100% rename from litellm/llms/OpenAI/openai.py rename to litellm/llms/openai/openai.py diff --git a/litellm/llms/OpenAI/realtime/handler.py b/litellm/llms/openai/realtime/handler.py similarity index 100% rename from litellm/llms/OpenAI/realtime/handler.py rename to litellm/llms/openai/realtime/handler.py diff --git a/litellm/llms/OpenAI/audio_transcriptions.py b/litellm/llms/openai/transcriptions/handler.py similarity index 99% rename from litellm/llms/OpenAI/audio_transcriptions.py rename to litellm/llms/openai/transcriptions/handler.py index d4523754c6..9e779c683d 100644 --- a/litellm/llms/OpenAI/audio_transcriptions.py +++ b/litellm/llms/openai/transcriptions/handler.py @@ -9,8 +9,7 @@ from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_name from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.types.utils import FileTypes from litellm.utils import TranscriptionResponse, convert_to_model_response_object - -from .openai import OpenAIChatCompletion +from ..openai import OpenAIChatCompletion class OpenAIAudioTranscription(OpenAIChatCompletion): diff --git a/litellm/llms/openai_like/chat/transformation.py b/litellm/llms/openai_like/chat/transformation.py index 2be71596aa..d60c70a378 100644 --- a/litellm/llms/openai_like/chat/transformation.py +++ b/litellm/llms/openai_like/chat/transformation.py @@ -14,7 +14,7 @@ from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantM from litellm.types.utils import ModelResponse from ....utils import _remove_additional_properties, _remove_strict_from_schema -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig class OpenAILikeChatConfig(OpenAIGPTConfig): diff --git a/litellm/llms/perplexity/chat/transformation.py b/litellm/llms/perplexity/chat/transformation.py index 6d17de7669..6b709e7fc3 100644 --- a/litellm/llms/perplexity/chat/transformation.py +++ b/litellm/llms/perplexity/chat/transformation.py @@ -12,7 +12,7 @@ from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage from ....utils import _remove_additional_properties, _remove_strict_from_schema -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig class PerplexityChatConfig(OpenAIGPTConfig): diff --git a/litellm/llms/together_ai/chat.py b/litellm/llms/together_ai/chat.py index 54b7e48680..51933196ed 100644 --- a/litellm/llms/together_ai/chat.py +++ b/litellm/llms/together_ai/chat.py @@ -10,7 +10,7 @@ from typing import Optional from litellm import get_model_info, verbose_logger -from ..OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ..openai.chat.gpt_transformation import OpenAIGPTConfig class TogetherAIConfig(OpenAIGPTConfig): diff --git a/litellm/llms/together_ai/completion/handler.py b/litellm/llms/together_ai/completion/handler.py index fac8794473..2574cb871a 100644 --- a/litellm/llms/together_ai/completion/handler.py +++ b/litellm/llms/together_ai/completion/handler.py @@ -12,7 +12,7 @@ from litellm.litellm_core_utils.litellm_logging import Logging from litellm.types.llms.openai import AllMessageValues, OpenAITextCompletionUserMessage from litellm.utils import ModelResponse -from ...OpenAI.completion.handler import OpenAITextCompletion +from ...openai.completion.handler import OpenAITextCompletion from .transformation import TogetherAITextCompletionConfig together_ai_text_completion_global_config = TogetherAITextCompletionConfig() diff --git a/litellm/llms/together_ai/completion/transformation.py b/litellm/llms/together_ai/completion/transformation.py index 6ec855de8b..ec3523932b 100644 --- a/litellm/llms/together_ai/completion/transformation.py +++ b/litellm/llms/together_ai/completion/transformation.py @@ -8,14 +8,14 @@ Docs: https://docs.together.ai/reference/completions-1 from typing import List, Union, cast -from litellm.llms.OpenAI.completion.utils import is_tokens_or_list_of_tokens +from litellm.llms.openai.completion.utils import is_tokens_or_list_of_tokens from litellm.types.llms.openai import ( AllMessageValues, AllPromptValues, OpenAITextCompletionUserMessage, ) -from ...OpenAI.completion.transformation import OpenAITextCompletionConfig +from ...openai.completion.transformation import OpenAITextCompletionConfig class TogetherAITextCompletionConfig(OpenAITextCompletionConfig): diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py b/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py index b9be8a3bd5..991d29d340 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/context_caching/vertex_ai_context_caching.py @@ -11,7 +11,7 @@ from litellm.llms.custom_httpx.http_handler import ( HTTPHandler, get_async_httpx_client, ) -from litellm.llms.OpenAI.openai import AllMessageValues +from litellm.llms.openai.openai import AllMessageValues from litellm.types.llms.vertex_ai import ( CachedContentListAllResponseBody, RequestBody, diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py b/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py index 170c2765dd..0cfe5cbe25 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/text_to_speech/text_to_speech_handler.py @@ -13,7 +13,7 @@ from litellm.llms.custom_httpx.http_handler import ( _get_httpx_client, get_async_httpx_client, ) -from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent +from litellm.llms.openai.openai import HttpxBinaryResponseContent from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( VertexLLM, ) diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_partner_models/main.py b/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_partner_models/main.py index fa2f9f7ff1..541f7937d8 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_partner_models/main.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_partner_models/main.py @@ -91,7 +91,7 @@ class VertexAIPartnerModels(VertexBase): from google.cloud import aiplatform from litellm.llms.anthropic.chat import AnthropicChatCompletion - from litellm.llms.OpenAI.openai import OpenAIChatCompletion + from litellm.llms.openai.openai import OpenAIChatCompletion from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler from litellm.llms.text_completion_codestral import CodestralTextCompletion from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/vertex_model_garden/main.py b/litellm/llms/vertex_ai_and_google_ai_studio/vertex_model_garden/main.py index 9dfc3efcc9..99aabea5ff 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/vertex_model_garden/main.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/vertex_model_garden/main.py @@ -76,7 +76,7 @@ class VertexAIModelGardenModels(VertexBase): from google.cloud import aiplatform from litellm.llms.anthropic.chat import AnthropicChatCompletion - from litellm.llms.OpenAI.openai import OpenAIChatCompletion + from litellm.llms.openai.openai import OpenAIChatCompletion from litellm.llms.openai_like.chat.handler import OpenAILikeChatHandler from litellm.llms.text_completion_codestral import CodestralTextCompletion from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import ( diff --git a/litellm/llms/watsonx/chat/transformation.py b/litellm/llms/watsonx/chat/transformation.py index 13fd516037..6e9dbe733c 100644 --- a/litellm/llms/watsonx/chat/transformation.py +++ b/litellm/llms/watsonx/chat/transformation.py @@ -14,7 +14,7 @@ from litellm.secret_managers.main import get_secret_str from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage from ....utils import _remove_additional_properties, _remove_strict_from_schema -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig class IBMWatsonXChatConfig(OpenAIGPTConfig): diff --git a/litellm/llms/xai/chat/transformation.py b/litellm/llms/xai/chat/transformation.py index ac3c423651..64dd52bd11 100644 --- a/litellm/llms/xai/chat/transformation.py +++ b/litellm/llms/xai/chat/transformation.py @@ -3,7 +3,7 @@ from typing import Literal, Optional, Tuple, Union from litellm.secret_managers.main import get_secret_str -from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...openai.chat.gpt_transformation import OpenAIGPTConfig XAI_API_BASE = "https://api.x.ai/v1" diff --git a/litellm/main.py b/litellm/main.py index e61fb2385e..66f91df331 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -115,10 +115,10 @@ from .llms.databricks.chat.handler import DatabricksChatCompletion from .llms.databricks.embed.handler import DatabricksEmbeddingHandler from .llms.groq.chat.handler import GroqChatCompletion from .llms.huggingface_restapi import Huggingface -from .llms.OpenAI.audio_transcriptions import OpenAIAudioTranscription -from .llms.OpenAI.chat.o1_handler import OpenAIO1ChatCompletion -from .llms.OpenAI.completion.handler import OpenAITextCompletion -from .llms.OpenAI.openai import OpenAIChatCompletion +from .llms.openai.transcriptions.handler import OpenAIAudioTranscription +from .llms.openai.chat.o1_handler import OpenAIO1ChatCompletion +from .llms.openai.completion.handler import OpenAITextCompletion +from .llms.openai.openai import OpenAIChatCompletion from .llms.openai_like.embedding.handler import OpenAILikeEmbeddingHandler from .llms.predibase import PredibaseChatCompletion from .llms.prompt_templates.common_utils import get_completion_messages diff --git a/litellm/realtime_api/main.py b/litellm/realtime_api/main.py index b17662c62c..3d17c4819a 100644 --- a/litellm/realtime_api/main.py +++ b/litellm/realtime_api/main.py @@ -10,7 +10,7 @@ from litellm.types.router import GenericLiteLLMParams from ..litellm_core_utils.litellm_logging import Logging as LiteLLMLogging from ..llms.azure.realtime.handler import AzureOpenAIRealtime -from ..llms.OpenAI.realtime.handler import OpenAIRealtime +from ..llms.openai.realtime.handler import OpenAIRealtime from ..utils import client as wrapper_client azure_realtime = AzureOpenAIRealtime() diff --git a/tests/local_testing/test_assistants.py b/tests/local_testing/test_assistants.py index 266bf65f4d..cf6d88b23a 100644 --- a/tests/local_testing/test_assistants.py +++ b/tests/local_testing/test_assistants.py @@ -20,15 +20,15 @@ from typing_extensions import override import litellm from litellm import create_thread, get_thread -from litellm.llms.OpenAI.openai import ( +from litellm.llms.openai.openai import ( AssistantEventHandler, AsyncAssistantEventHandler, AsyncCursorPage, MessageData, OpenAIAssistantsAPI, ) -from litellm.llms.OpenAI.openai import OpenAIMessage as Message -from litellm.llms.OpenAI.openai import SyncCursorPage, Thread +from litellm.llms.openai.openai import OpenAIMessage as Message +from litellm.llms.openai.openai import SyncCursorPage, Thread """ V0 Scope: diff --git a/tests/local_testing/test_audio_speech.py b/tests/local_testing/test_audio_speech.py index ac37b1b0ce..9c96a3727f 100644 --- a/tests/local_testing/test_audio_speech.py +++ b/tests/local_testing/test_audio_speech.py @@ -60,7 +60,7 @@ async def test_audio_speech_litellm(sync_mode, model, api_base, api_key): optional_params={}, ) - from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent + from litellm.llms.openai.openai import HttpxBinaryResponseContent assert isinstance(response, HttpxBinaryResponseContent) else: @@ -78,7 +78,7 @@ async def test_audio_speech_litellm(sync_mode, model, api_base, api_key): optional_params={}, ) - from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent + from litellm.llms.openai.openai import HttpxBinaryResponseContent assert isinstance(response, HttpxBinaryResponseContent) @@ -109,7 +109,7 @@ async def test_audio_speech_litellm_vertex(sync_mode): from types import SimpleNamespace - from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent + from litellm.llms.openai.openai import HttpxBinaryResponseContent response.stream_to_file(speech_file_path) diff --git a/tests/local_testing/test_config.py b/tests/local_testing/test_config.py index 3d73ad000a..353fcd28eb 100644 --- a/tests/local_testing/test_config.py +++ b/tests/local_testing/test_config.py @@ -294,7 +294,7 @@ def test_provider_config_manager(): from litellm import LITELLM_CHAT_PROVIDERS, LlmProviders from litellm.utils import ProviderConfigManager from litellm.llms.base_llm.transformation import BaseConfig - from litellm.llms.OpenAI.chat.gpt_transformation import OpenAIGPTConfig + from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig for provider in LITELLM_CHAT_PROVIDERS: if provider == LlmProviders.TRITON or provider == LlmProviders.PREDIBASE: diff --git a/tests/router_unit_tests/test_router_endpoints.py b/tests/router_unit_tests/test_router_endpoints.py index 19949ddba3..0d13b9c610 100644 --- a/tests/router_unit_tests/test_router_endpoints.py +++ b/tests/router_unit_tests/test_router_endpoints.py @@ -178,7 +178,7 @@ async def test_audio_speech_router(mode): optional_params={}, ) - from litellm.llms.OpenAI.openai import HttpxBinaryResponseContent + from litellm.llms.openai.openai import HttpxBinaryResponseContent assert isinstance(response, HttpxBinaryResponseContent)