fix use consistent naming (#7092)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 11s

This commit is contained in:
Ishaan Jaff 2024-12-07 22:01:00 -08:00 committed by GitHub
parent 7d4a1cb4e2
commit 36e99ebce7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 20 additions and 20 deletions

View file

@ -1167,18 +1167,18 @@ from .llms.jina_ai.embedding.transformation import JinaAIEmbeddingConfig
from .llms.xai.chat.xai_transformation import XAIChatConfig from .llms.xai.chat.xai_transformation import XAIChatConfig
from .llms.volcengine import VolcEngineConfig from .llms.volcengine import VolcEngineConfig
from .llms.text_completion_codestral import MistralTextCompletionConfig from .llms.text_completion_codestral import MistralTextCompletionConfig
from .llms.AzureOpenAI.azure import ( from .llms.azure.azure import (
AzureOpenAIError, AzureOpenAIError,
AzureOpenAIAssistantsAPIConfig, AzureOpenAIAssistantsAPIConfig,
) )
from .llms.AzureOpenAI.chat.gpt_transformation import AzureOpenAIConfig from .llms.azure.chat.gpt_transformation import AzureOpenAIConfig
from .llms.hosted_vllm.chat.transformation import HostedVLLMChatConfig from .llms.hosted_vllm.chat.transformation import HostedVLLMChatConfig
from .llms.deepseek.chat.transformation import DeepSeekChatConfig from .llms.deepseek.chat.transformation import DeepSeekChatConfig
from .llms.lm_studio.chat.transformation import LMStudioChatConfig from .llms.lm_studio.chat.transformation import LMStudioChatConfig
from .llms.lm_studio.embed.transformation import LmStudioEmbeddingConfig from .llms.lm_studio.embed.transformation import LmStudioEmbeddingConfig
from .llms.perplexity.chat.transformation import PerplexityChatConfig from .llms.perplexity.chat.transformation import PerplexityChatConfig
from .llms.AzureOpenAI.chat.o1_transformation import AzureOpenAIO1Config from .llms.azure.chat.o1_transformation import AzureOpenAIO1Config
from .llms.watsonx.completion.handler import IBMWatsonXAIConfig from .llms.watsonx.completion.handler import IBMWatsonXAIConfig
from .llms.watsonx.chat.transformation import IBMWatsonXChatConfig from .llms.watsonx.chat.transformation import IBMWatsonXChatConfig
from .main import * # type: ignore from .main import * # type: ignore

View file

@ -12,7 +12,7 @@ from openai.types.beta.assistant import Assistant
from openai.types.beta.assistant_deleted import AssistantDeleted from openai.types.beta.assistant_deleted import AssistantDeleted
import litellm import litellm
from litellm.llms.AzureOpenAI import assistants from litellm.llms.azure import assistants
from litellm.types.router import GenericLiteLLMParams from litellm.types.router import GenericLiteLLMParams
from litellm.utils import ( from litellm.utils import (
exception_type, exception_type,
@ -21,7 +21,7 @@ from litellm.utils import (
supports_httpx_timeout, supports_httpx_timeout,
) )
from ..llms.AzureOpenAI.assistants import AzureAssistantsAPI from ..llms.azure.assistants import AzureAssistantsAPI
from ..llms.OpenAI.openai import OpenAIAssistantsAPI from ..llms.OpenAI.openai import OpenAIAssistantsAPI
from ..types.llms.openai import * from ..types.llms.openai import *
from ..types.router import * from ..types.router import *

View file

@ -20,7 +20,7 @@ import httpx
import litellm import litellm
from litellm import client from litellm import client
from litellm.llms.AzureOpenAI.azure import AzureBatchesAPI from litellm.llms.azure.azure import AzureBatchesAPI
from litellm.llms.OpenAI.openai import OpenAIBatchesAPI from litellm.llms.OpenAI.openai import OpenAIBatchesAPI
from litellm.llms.vertex_ai_and_google_ai_studio.batches.handler import ( from litellm.llms.vertex_ai_and_google_ai_studio.batches.handler import (
VertexAIBatchPrediction, VertexAIBatchPrediction,

View file

@ -22,12 +22,12 @@ from litellm.litellm_core_utils.llm_cost_calc.utils import _generic_cost_per_cha
from litellm.llms.anthropic.cost_calculation import ( from litellm.llms.anthropic.cost_calculation import (
cost_per_token as anthropic_cost_per_token, cost_per_token as anthropic_cost_per_token,
) )
from litellm.llms.azure.cost_calculation import (
cost_per_token as azure_openai_cost_per_token,
)
from litellm.llms.azure_ai.cost_calculator import ( from litellm.llms.azure_ai.cost_calculator import (
cost_per_query as azure_ai_rerank_cost_per_query, cost_per_query as azure_ai_rerank_cost_per_query,
) )
from litellm.llms.AzureOpenAI.cost_calculation import (
cost_per_token as azure_openai_cost_per_token,
)
from litellm.llms.bedrock.image.cost_calculator import ( from litellm.llms.bedrock.image.cost_calculator import (
cost_calculator as bedrock_image_cost_calculator, cost_calculator as bedrock_image_cost_calculator,
) )

View file

@ -102,12 +102,12 @@ from .llms import (
from .llms.ai21 import completion as ai21 from .llms.ai21 import completion as ai21
from .llms.anthropic.chat import AnthropicChatCompletion from .llms.anthropic.chat import AnthropicChatCompletion
from .llms.anthropic.completion import AnthropicTextCompletion from .llms.anthropic.completion import AnthropicTextCompletion
from .llms.azure.audio_transcriptions import AzureAudioTranscription
from .llms.azure.azure import AzureChatCompletion, _check_dynamic_azure_params
from .llms.azure.chat.o1_handler import AzureOpenAIO1ChatCompletion
from .llms.azure_ai.chat import AzureAIChatCompletion from .llms.azure_ai.chat import AzureAIChatCompletion
from .llms.azure_ai.embed import AzureAIEmbedding from .llms.azure_ai.embed import AzureAIEmbedding
from .llms.azure_text import AzureTextCompletion from .llms.azure_text import AzureTextCompletion
from .llms.AzureOpenAI.audio_transcriptions import AzureAudioTranscription
from .llms.AzureOpenAI.azure import AzureChatCompletion, _check_dynamic_azure_params
from .llms.AzureOpenAI.chat.o1_handler import AzureOpenAIO1ChatCompletion
from .llms.bedrock.chat import BedrockConverseLLM, BedrockLLM from .llms.bedrock.chat import BedrockConverseLLM, BedrockLLM
from .llms.bedrock.embed.embedding import BedrockEmbedding from .llms.bedrock.embed.embedding import BedrockEmbedding
from .llms.bedrock.image.image_handler import BedrockImageGeneration from .llms.bedrock.image.image_handler import BedrockImageGeneration

View file

@ -9,7 +9,7 @@ from litellm.secret_managers.main import get_secret_str
from litellm.types.router import GenericLiteLLMParams from litellm.types.router import GenericLiteLLMParams
from ..litellm_core_utils.litellm_logging import Logging as LiteLLMLogging from ..litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
from ..llms.AzureOpenAI.realtime.handler import AzureOpenAIRealtime from ..llms.azure.realtime.handler import AzureOpenAIRealtime
from ..llms.OpenAI.realtime.handler import OpenAIRealtime from ..llms.OpenAI.realtime.handler import OpenAIRealtime
from ..utils import client as wrapper_client from ..utils import client as wrapper_client

View file

@ -54,7 +54,7 @@ from litellm.caching.caching import DualCache, InMemoryCache, RedisCache
from litellm.integrations.custom_logger import CustomLogger from litellm.integrations.custom_logger import CustomLogger
from litellm.litellm_core_utils.core_helpers import _get_parent_otel_span_from_kwargs from litellm.litellm_core_utils.core_helpers import _get_parent_otel_span_from_kwargs
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging
from litellm.llms.AzureOpenAI.azure import get_azure_ad_token_from_oidc from litellm.llms.azure.azure import get_azure_ad_token_from_oidc
from litellm.router_strategy.least_busy import LeastBusyLoggingHandler from litellm.router_strategy.least_busy import LeastBusyLoggingHandler
from litellm.router_strategy.lowest_cost import LowestCostLoggingHandler from litellm.router_strategy.lowest_cost import LowestCostLoggingHandler
from litellm.router_strategy.lowest_latency import LowestLatencyLoggingHandler from litellm.router_strategy.lowest_latency import LowestLatencyLoggingHandler

View file

@ -9,7 +9,7 @@ import openai
import litellm import litellm
from litellm import get_secret, get_secret_str from litellm import get_secret, get_secret_str
from litellm._logging import verbose_router_logger from litellm._logging import verbose_router_logger
from litellm.llms.AzureOpenAI.azure import get_azure_ad_token_from_oidc from litellm.llms.azure.azure import get_azure_ad_token_from_oidc
from litellm.secret_managers.get_azure_ad_token_provider import ( from litellm.secret_managers.get_azure_ad_token_provider import (
get_azure_ad_token_provider, get_azure_ad_token_provider,
) )
@ -359,7 +359,7 @@ class InitalizeOpenAISDKClient:
azure_client_params["azure_ad_token_provider"] = ( azure_client_params["azure_ad_token_provider"] = (
azure_ad_token_provider azure_ad_token_provider
) )
from litellm.llms.AzureOpenAI.azure import ( from litellm.llms.azure.azure import (
select_azure_base_url_or_endpoint, select_azure_base_url_or_endpoint,
) )

View file

@ -6,7 +6,7 @@ sys.path.insert(
) # Adds the parent directory to the system path ) # Adds the parent directory to the system path
import pytest import pytest
from litellm.llms.AzureOpenAI.common_utils import process_azure_headers from litellm.llms.azure.common_utils import process_azure_headers
from httpx import Headers from httpx import Headers
@ -172,7 +172,7 @@ def test_azure_extra_headers(input, call_type):
], ],
) )
def test_process_azure_endpoint_url(api_base, model, expected_endpoint): def test_process_azure_endpoint_url(api_base, model, expected_endpoint):
from litellm.llms.AzureOpenAI.azure import AzureChatCompletion from litellm.llms.azure.azure import AzureChatCompletion
azure_chat_completion = AzureChatCompletion() azure_chat_completion = AzureChatCompletion()
input_args = { input_args = {

View file

@ -231,7 +231,7 @@ def test_all_model_configs():
optional_params={}, optional_params={},
) == {"max_tokens": 10} ) == {"max_tokens": 10}
from litellm.llms.AzureOpenAI.chat.gpt_transformation import AzureOpenAIConfig from litellm.llms.azure.chat.gpt_transformation import AzureOpenAIConfig
assert "max_completion_tokens" in AzureOpenAIConfig().get_supported_openai_params() assert "max_completion_tokens" in AzureOpenAIConfig().get_supported_openai_params()
assert AzureOpenAIConfig().map_openai_params( assert AzureOpenAIConfig().map_openai_params(

View file

@ -16,7 +16,7 @@ sys.path.insert(
) # Adds the parent directory to the system path ) # Adds the parent directory to the system path
import pytest import pytest
import litellm import litellm
from litellm.llms.AzureOpenAI.azure import get_azure_ad_token_from_oidc from litellm.llms.azure.azure import get_azure_ad_token_from_oidc
from litellm.llms.bedrock.chat import BedrockConverseLLM, BedrockLLM from litellm.llms.bedrock.chat import BedrockConverseLLM, BedrockLLM
from litellm.secret_managers.aws_secret_manager_v2 import AWSSecretsManagerV2 from litellm.secret_managers.aws_secret_manager_v2 import AWSSecretsManagerV2
from litellm.secret_managers.main import ( from litellm.secret_managers.main import (