(Refactor) Code Quality improvement - remove /prompt_templates/ , base_aws_llm.py from /llms folder (#7164)

* fix move base_aws_llm

* fix import

* update enforce llms folder style

* move prompt_templates

* update prompt_templates location

* fix imports

* fix imports

* fix imports

* fix imports

* fix checks
This commit is contained in:
Ishaan Jaff 2024-12-11 00:02:46 -08:00 committed by GitHub
parent b328d42ebc
commit b5d55688e5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
64 changed files with 135 additions and 81 deletions

View file

@ -27,7 +27,7 @@ from litellm.llms.custom_httpx.http_handler import (
get_async_httpx_client,
httpxSpecialProvider,
)
from litellm.llms.prompt_templates.common_utils import get_content_from_model_response
from litellm.litellm_core_utils.prompt_templates.common_utils import get_content_from_model_response
from litellm.types.integrations.argilla import (
SUPPORTED_PAYLOAD_FIELDS,
ArgillaCredentialsObject,

View file

@ -21,7 +21,7 @@ import requests
import litellm
from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.factory import anthropic_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
from litellm.types.llms.anthropic import (
AllAnthropicToolsValues,
AnthropicComputerTool,

View file

@ -17,7 +17,7 @@ from litellm.llms.base_llm.transformation import (
BaseLLMException,
LiteLLMLoggingObj,
)
from litellm.llms.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import (
ChatCompletionToolCallChunk,

View file

@ -57,7 +57,7 @@ from litellm.types.utils import Choices, GenericStreamingChunk
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage
from ...base import BaseLLM
from ...prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
anthropic_messages_pt,
custom_prompt,
prompt_factory,

View file

@ -14,8 +14,9 @@ from ....types.llms.openai import (
ChatCompletionToolParam,
ChatCompletionToolParamFunctionChunk,
)
from ...base_llm.transformation import BaseConfig
from ...prompt_templates.factory import convert_to_azure_openai_messages
from litellm.litellm_core_utils.prompt_templates.factory import convert_to_azure_openai_messages
from ..common_utils import AzureOpenAIError
if TYPE_CHECKING:
@ -25,7 +26,6 @@ if TYPE_CHECKING:
else:
LoggingClass = Any
class AzureOpenAIConfig(BaseConfig):
"""
Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions

View file

@ -22,7 +22,7 @@ from litellm.utils import (
from ...base import BaseLLM
from ...openai.completion.handler import OpenAITextCompletion
from ...openai.completion.transformation import OpenAITextCompletionConfig
from ...prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import AzureOpenAIError
openai_text_completion_config = OpenAITextCompletionConfig()

View file

@ -3,7 +3,7 @@ from typing import List, Optional, Tuple
import litellm
from litellm._logging import verbose_logger
from litellm.llms.openai.openai import OpenAIConfig
from litellm.llms.prompt_templates.common_utils import (
from litellm.litellm_core_utils.prompt_templates.common_utils import (
_audio_or_image_in_message_content,
convert_content_list_to_str,
)

View file

@ -10,7 +10,7 @@ from litellm._logging import verbose_logger
from litellm.caching.caching import DualCache, InMemoryCache
from litellm.secret_managers.main import get_secret, get_secret_str
from .base import BaseLLM
from litellm.llms.base import BaseLLM
if TYPE_CHECKING:
from botocore.credentials import Credentials

View file

@ -14,7 +14,7 @@ from litellm.llms.custom_httpx.http_handler import (
from litellm.types.utils import ModelResponse
from litellm.utils import CustomStreamWrapper, get_secret
from ...base_aws_llm import BaseAWSLLM
from ..base_aws_llm import BaseAWSLLM
from ..common_utils import BedrockError
from .invoke_handler import AWSEventStreamDecoder, MockResponseIterator, make_call

View file

@ -24,7 +24,7 @@ from litellm.types.llms.openai import (
from litellm.types.utils import ModelResponse, Usage
from litellm.utils import CustomStreamWrapper, add_dummy_tool, has_tool_call_blocks
from ...prompt_templates.factory import _bedrock_converse_messages_pt, _bedrock_tools_pt
from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt, _bedrock_tools_pt
from ..common_utils import BedrockError, get_bedrock_tool_name

View file

@ -52,8 +52,8 @@ from litellm.types.llms.openai import (
from litellm.types.utils import GenericStreamingChunk as GChunk
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage, get_secret
from ...base_aws_llm import BaseAWSLLM
from ...prompt_templates.factory import (
from ..base_aws_llm import BaseAWSLLM
from litellm.litellm_core_utils.prompt_templates.factory import (
_bedrock_converse_messages_pt,
_bedrock_tools_pt,
cohere_message_pt,

View file

@ -22,7 +22,7 @@ from litellm.secret_managers.main import get_secret
from litellm.types.llms.bedrock import AmazonEmbeddingRequest, CohereEmbeddingRequest
from litellm.types.utils import Embedding, EmbeddingResponse, Usage
from ...base_aws_llm import BaseAWSLLM
from ..base_aws_llm import BaseAWSLLM
from ..common_utils import BedrockError
from .amazon_titan_g1_transformation import AmazonTitanG1Config
from .amazon_titan_multimodal_transformation import (

View file

@ -16,7 +16,7 @@ from litellm.llms.custom_httpx.http_handler import (
)
from litellm.types.utils import ImageResponse
from ...base_aws_llm import BaseAWSLLM
from ..base_aws_llm import BaseAWSLLM
from ..common_utils import BedrockError
if TYPE_CHECKING:

View file

@ -18,7 +18,7 @@ from litellm.types.llms.bedrock import BedrockPreparedRequest, BedrockRerankRequ
from litellm.types.rerank import RerankRequest
from litellm.types.utils import RerankResponse
from ...base_aws_llm import BaseAWSLLM
from ..base_aws_llm import BaseAWSLLM
from ..common_utils import BedrockError
from .transformation import BedrockRerankConfig

View file

@ -7,7 +7,7 @@ import httpx
import litellm
from litellm.llms.base_llm.base_model_iterator import FakeStreamResponseIterator
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.common_utils import convert_content_list_to_str
from litellm.litellm_core_utils.prompt_templates.common_utils import convert_content_list_to_str
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import (
ChatCompletionToolCallChunk,

View file

@ -6,7 +6,7 @@ import httpx
import litellm
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.factory import cohere_messages_pt_v2
from litellm.litellm_core_utils.prompt_templates.factory import cohere_messages_pt_v2
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import ModelResponse, Usage

View file

@ -7,7 +7,7 @@ import httpx
import litellm
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.common_utils import convert_content_list_to_str
from litellm.litellm_core_utils.prompt_templates.common_utils import convert_content_list_to_str
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import (
ChatCompletionToolCallChunk,

View file

@ -45,8 +45,6 @@ from litellm.utils import (
)
from .base import BaseLLM
from .prompt_templates.factory import custom_prompt, prompt_factory
class CustomLLMError(Exception): # use this for all your exceptions
def __init__(

View file

@ -11,7 +11,7 @@ from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import ProviderField
from ...openai_like.chat.transformation import OpenAILikeChatConfig
from ...prompt_templates.common_utils import (
from litellm.litellm_core_utils.prompt_templates.common_utils import (
handle_messages_with_content_list_to_str_conversion,
strip_name_from_messages,
)

View file

@ -13,7 +13,7 @@ from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantM
from ....utils import _remove_additional_properties, _remove_strict_from_schema
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
from ...prompt_templates.common_utils import (
from litellm.litellm_core_utils.prompt_templates.common_utils import (
handle_messages_with_content_list_to_str_conversion,
)

View file

@ -40,7 +40,6 @@ from litellm.types.utils import Logprobs as TextCompletionLogprobs
from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage
from ...base import BaseLLM
from ...prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import HuggingfaceError, hf_task_list, hf_tasks
hf_chat_config = HuggingfaceConfig()

View file

@ -10,8 +10,8 @@ import httpx
import litellm
from litellm.litellm_core_utils.streaming_handler import CustomStreamWrapper
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.common_utils import convert_content_list_to_str
from litellm.llms.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.common_utils import convert_content_list_to_str
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import Choices, Message, ModelResponse, Usage

View file

@ -10,7 +10,7 @@ import types
from typing import List, Literal, Optional, Tuple, Union
from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
from litellm.llms.prompt_templates.common_utils import (
from litellm.litellm_core_utils.prompt_templates.common_utils import (
handle_messages_with_content_list_to_str_conversion,
strip_none_values_from_message,
)

View file

@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, Union
import httpx
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.common_utils import convert_content_list_to_str
from litellm.litellm_core_utils.prompt_templates.common_utils import convert_content_list_to_str
from litellm.types.llms.openai import AllMessageValues
from litellm.utils import ModelResponse, Usage

View file

@ -18,7 +18,7 @@ from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
from litellm.secret_managers.main import get_secret_str
from litellm.types.utils import ModelInfo, ProviderField, StreamingChoices
from ...prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import OllamaError
from .transformation import OllamaConfig

View file

@ -9,7 +9,7 @@ import requests # type: ignore
from litellm.llms.custom_httpx.http_handler import HTTPHandler, _get_httpx_client
from litellm.utils import EmbeddingResponse, ModelResponse, Usage
from ...prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import OobaboogaError
from .transformation import OobaboogaConfig

View file

@ -8,8 +8,6 @@ import httpx
import litellm
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
from litellm.llms.prompt_templates.common_utils import convert_content_list_to_str
from litellm.llms.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import Choices, Message, ModelResponse, Usage
from litellm.utils import token_counter

View file

@ -14,7 +14,7 @@ from litellm.types.llms.openai import (
)
from litellm.types.utils import Choices, Message, ModelResponse, TextCompletionResponse
from ...prompt_templates.common_utils import convert_content_list_to_str
from litellm.litellm_core_utils.prompt_templates.common_utils import convert_content_list_to_str
from ..chat.gpt_transformation import OpenAIGPTConfig
from ..common_utils import OpenAIError
from .utils import is_tokens_or_list_of_tokens

View file

@ -45,7 +45,7 @@ from litellm.utils import (
from ...types.llms.openai import *
from ..base import BaseLLM
from ..prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from .chat.gpt_transformation import OpenAIGPTConfig
from .common_utils import OpenAIError, drop_params_from_unprocessable_entity_error

View file

@ -10,7 +10,7 @@ import requests # type: ignore
import litellm
from litellm.utils import ModelResponse, Usage
from .prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
class PetalsError(Exception):

View file

@ -26,7 +26,7 @@ from litellm.llms.custom_httpx.http_handler import (
from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage
from .base import BaseLLM
from .prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
class PredibaseError(Exception):

View file

@ -17,7 +17,6 @@ from litellm.llms.custom_httpx.http_handler import (
from litellm.types.llms.openai import AllMessageValues
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage
from ...prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import ReplicateError
from .transformation import ReplicateConfig

View file

@ -5,8 +5,8 @@ import httpx
import litellm
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.common_utils import convert_content_list_to_str
from litellm.llms.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.common_utils import convert_content_list_to_str
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import Choices, Message, ModelResponse, Usage
from litellm.utils import token_counter

View file

@ -6,8 +6,8 @@ import httpx
from litellm.utils import ModelResponse, get_secret
from ...base_aws_llm import BaseAWSLLM
from ...prompt_templates.factory import custom_prompt, prompt_factory
from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import AWSEventStreamDecoder
from .transformation import SagemakerChatConfig

View file

@ -31,8 +31,8 @@ from litellm.utils import (
get_secret,
)
from ...base_aws_llm import BaseAWSLLM
from ...prompt_templates.factory import custom_prompt, prompt_factory
from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from ..common_utils import AWSEventStreamDecoder, SagemakerError
from .transformation import SagemakerConfig

View file

@ -14,7 +14,7 @@ from httpx._models import Headers, Response
import litellm
from litellm.litellm_core_utils.asyncify import asyncify
from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException
from litellm.llms.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
from litellm.types.llms.openai import AllMessageValues
from litellm.types.utils import Usage

View file

@ -33,7 +33,7 @@ from litellm.utils import (
)
from .base import BaseLLM
from .prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
class TextCompletionCodestralError(Exception):

View file

@ -25,7 +25,7 @@ from litellm.utils import (
)
from .base import BaseLLM
from .prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
class TritonError(Exception):

View file

@ -13,7 +13,7 @@ from pydantic import BaseModel
import litellm
from litellm._logging import verbose_logger
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_anthropic_image_obj,
convert_to_gemini_tool_call_invoke,
convert_to_gemini_tool_call_result,

View file

@ -37,7 +37,7 @@ from litellm.llms.custom_httpx.http_handler import (
HTTPHandler,
get_async_httpx_client,
)
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_generic_image_chunk_to_openai_image_obj,
convert_to_anthropic_image_obj,
)

View file

@ -15,7 +15,7 @@ import litellm
from litellm._logging import verbose_logger
from litellm.litellm_core_utils.core_helpers import map_finish_reason
from litellm.llms.custom_httpx.http_handler import _DEFAULT_TTL_FOR_HTTPX_CLIENTS
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_anthropic_image_obj,
convert_to_gemini_tool_call_invoke,
convert_to_gemini_tool_call_result,

View file

@ -24,7 +24,7 @@ from litellm.types.utils import ResponseFormatChunk
from litellm.utils import CustomStreamWrapper, ModelResponse, Usage
from ....anthropic.chat.transformation import AnthropicConfig
from ....prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
construct_tool_use_system_prompt,
contains_tag,
custom_prompt,

View file

@ -9,7 +9,7 @@ import requests # type: ignore
from litellm.utils import ModelResponse, Usage
from ...prompt_templates.factory import custom_prompt, prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import custom_prompt, prompt_factory
llm = None

View file

@ -34,7 +34,7 @@ from litellm.types.llms.watsonx import WatsonXAIEndpoint
from litellm.utils import EmbeddingResponse, ModelResponse, Usage, map_finish_reason
from ...base import BaseLLM
from ...prompt_templates import factory as ptf
from litellm.litellm_core_utils.prompt_templates import factory as ptf
from ..common_utils import WatsonXAIError, _get_api_params, generate_iam_token
from .transformation import IBMWatsonXAIConfig

View file

@ -36,7 +36,7 @@ from litellm.utils import EmbeddingResponse, ModelResponse, Usage, map_finish_re
from ...base import BaseLLM
from ...base_llm.transformation import BaseConfig
from ...prompt_templates import factory as ptf
from litellm.litellm_core_utils.prompt_templates import factory as ptf
from ..common_utils import WatsonXAIError, _get_api_params, generate_iam_token
if TYPE_CHECKING:

View file

@ -57,7 +57,7 @@ from litellm.litellm_core_utils.mock_functions import (
mock_image_generation,
)
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.common_utils import get_content_from_model_response
from litellm.litellm_core_utils.prompt_templates.common_utils import get_content_from_model_response
from litellm.secret_managers.main import get_secret_str
from litellm.utils import (
CustomStreamWrapper,
@ -110,8 +110,8 @@ from .llms.openai.openai import OpenAIChatCompletion
from .llms.openai_like.chat.handler import OpenAILikeChatHandler
from .llms.openai_like.embedding.handler import OpenAILikeEmbeddingHandler
from .llms.predibase import PredibaseChatCompletion
from .llms.prompt_templates.common_utils import get_completion_messages
from .llms.prompt_templates.factory import (
from .litellm_core_utils.prompt_templates.common_utils import get_completion_messages
from .litellm_core_utils.prompt_templates.factory import (
custom_prompt,
function_call_prompt,
map_system_message_pt,

View file

@ -30,7 +30,7 @@ from litellm.integrations.custom_guardrail import CustomGuardrail
from litellm.litellm_core_utils.logging_utils import (
convert_litellm_response_object_to_str,
)
from litellm.llms.base_aws_llm import BaseAWSLLM
from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM
from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
get_async_httpx_client,

View file

@ -14,7 +14,7 @@ import litellm
from litellm._logging import verbose_proxy_logger
from litellm.caching.caching import DualCache
from litellm.integrations.custom_guardrail import CustomGuardrail
from litellm.llms.prompt_templates.common_utils import (
from litellm.litellm_core_utils.prompt_templates.common_utils import (
convert_openai_message_to_only_content_messages,
get_content_from_model_response,
)

View file

@ -20,7 +20,7 @@ import litellm
from litellm._logging import verbose_proxy_logger
from litellm.caching.caching import DualCache
from litellm.integrations.custom_logger import CustomLogger
from litellm.llms.prompt_templates.factory import prompt_injection_detection_default_pt
from litellm.litellm_core_utils.prompt_templates.factory import prompt_injection_detection_default_pt
from litellm.proxy._types import LiteLLMPromptInjectionParams, UserAPIKeyAuth
from litellm.utils import get_formatted_prompt

View file

@ -26,14 +26,13 @@ import httpx
import litellm
from litellm._logging import verbose_logger
from litellm.llms.base_aws_llm import BaseAWSLLM
from litellm.llms.custom_httpx.http_handler import (
_get_httpx_client,
get_async_httpx_client,
)
from litellm.proxy._types import KeyManagementSystem
from litellm.types.llms.custom_http import httpxSpecialProvider
from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM
class AWSSecretsManagerV2(BaseAWSLLM):
@classmethod

View file

@ -0,0 +1,61 @@
import ast
import os
import sys
sys.path.insert(
0, os.path.abspath("../..")
)
import litellm
ALLOWED_FILES_IN_LLMS_FOLDER = ["__init__", "base", "base_llm", "custom_httpx", "custom_llm", "deprecated_providers"]
def get_unique_names_from_llms_dir(base_dir="./litellm/llms/"):
"""
Returns a set of unique file and folder names from the root level of litellm/llms directory,
excluding file extensions and __init__.py
"""
unique_names = set()
if not os.path.exists(base_dir):
print(f"Warning: Directory {base_dir} does not exist.")
return unique_names
# Get only root level items
items = os.listdir(base_dir)
for item in items:
item_path = os.path.join(base_dir, item)
if os.path.isdir(item_path):
if item != "__pycache__":
unique_names.add(item)
elif item.endswith(".py") and item != "__init__.py":
name_without_ext = os.path.splitext(item)[0]
unique_names.add(name_without_ext)
return unique_names
def run_lint_check(unique_names):
_all_litellm_providers = [str(provider.value) for provider in litellm.LlmProviders]
violations = []
for name in unique_names:
if name.lower() not in _all_litellm_providers and name not in ALLOWED_FILES_IN_LLMS_FOLDER:
violations.append(name)
if len(violations) > 0:
raise ValueError(f"There are {len(violations)} violations in the llms folder. \n\n {violations}. \n\n Valid providers: {_all_litellm_providers}")
def main():
llms_dir = "./litellm/llms/" # Update this path if needed
llms_dir = "../../litellm/llms/" # LOCAL TESTING
unique_names = get_unique_names_from_llms_dir(llms_dir)
print("Unique names in llms directory:", sorted(list(unique_names)))
run_lint_check(unique_names)
if __name__ == "__main__":
main()

View file

@ -670,7 +670,7 @@ class TestAnthropicCompletion(BaseLLMChatTest):
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_anthropic_tool_invoke,
)
@ -767,7 +767,7 @@ def test_convert_tool_response_to_message_no_arguments():
def test_anthropic_tool_with_image():
from litellm.llms.prompt_templates.factory import prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory
import json
b64_data = "iVBORw0KGgoAAAANSUhEu6U3//C9t/fKv5wDgpP1r5796XwC4zyH1D565bHGDqbY85AMb0nIQe+u3J390Xbtb9XgXxcK0/aqRXpdYcwgARbCN03FJk"

View file

@ -33,7 +33,7 @@ from litellm import (
)
from litellm.llms.bedrock.chat import BedrockLLM
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.factory import _bedrock_tools_pt
from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_tools_pt
from base_llm_unit_tests import BaseLLMChatTest
from base_rerank_unit_tests import BaseLLMRerankTest
@ -1262,7 +1262,7 @@ def test_bedrock_get_base_model(model, expected_base_model):
assert litellm.AmazonConverseConfig()._get_base_model(model) == expected_base_model
from litellm.llms.prompt_templates.factory import _bedrock_converse_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt
def test_bedrock_converse_translation_tool_message():
@ -1320,7 +1320,7 @@ def test_base_aws_llm_get_credentials():
import boto3
from litellm.llms.base_aws_llm import BaseAWSLLM
from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM
start_time = time.time()
session = boto3.Session(
@ -1603,7 +1603,7 @@ def test_bedrock_completion_test_3():
Check if content in tool result is formatted correctly
"""
from litellm.types.utils import ChatCompletionMessageToolCall, Function, Message
from litellm.llms.prompt_templates.factory import _bedrock_converse_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt
messages = [
{

View file

@ -7,7 +7,7 @@ class TestGoogleAIStudioGemini(BaseLLMChatTest):
def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_to_gemini_tool_call_invoke,
)

View file

@ -13,7 +13,7 @@ sys.path.insert(0, os.path.abspath("../.."))
from unittest.mock import MagicMock, patch
import litellm
from litellm.llms.prompt_templates.factory import map_system_message_pt
from litellm.litellm_core_utils.prompt_templates.factory import map_system_message_pt
from litellm.types.completion import (
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,

View file

@ -9,10 +9,10 @@ sys.path.insert(0, os.path.abspath("../.."))
from typing import Union
# from litellm.llms.prompt_templates.factory import prompt_factory
# from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory
import litellm
from litellm import completion
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
_bedrock_tools_pt,
anthropic_messages_pt,
anthropic_pt,
@ -22,7 +22,7 @@ from litellm.llms.prompt_templates.factory import (
llama_2_chat_pt,
prompt_factory,
)
from litellm.llms.prompt_templates.common_utils import (
from litellm.litellm_core_utils.prompt_templates.common_utils import (
get_completion_messages,
)
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import (
@ -327,7 +327,7 @@ def test_bedrock_parallel_tool_calling_pt(provider):
"""
Make sure parallel tool call blocks are merged correctly - https://github.com/BerriAI/litellm/issues/5277
"""
from litellm.llms.prompt_templates.factory import _bedrock_converse_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt
from litellm.types.utils import ChatCompletionMessageToolCall, Function, Message
messages = [
@ -682,7 +682,7 @@ def test_alternating_roles_e2e():
def test_just_system_message():
from litellm.llms.prompt_templates.factory import _bedrock_converse_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt
with pytest.raises(litellm.BadRequestError) as e:
_bedrock_converse_messages_pt(
@ -694,7 +694,7 @@ def test_just_system_message():
def test_convert_generic_image_chunk_to_openai_image_obj():
from litellm.llms.prompt_templates.factory import (
from litellm.litellm_core_utils.prompt_templates.factory import (
convert_generic_image_chunk_to_openai_image_obj,
convert_to_anthropic_image_obj,
)

View file

@ -23,7 +23,7 @@ import pytest
import litellm
from litellm import RateLimitError, Timeout, completion, completion_cost, embedding
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.factory import anthropic_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
# litellm.num_retries =3
litellm.cache = None

View file

@ -27,7 +27,7 @@ from respx import MockRouter
import litellm
from litellm import RateLimitError, Timeout, completion, completion_cost, embedding
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.factory import anthropic_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
from litellm.router import Router

View file

@ -22,7 +22,7 @@ import pytest
import litellm
from litellm import RateLimitError, Timeout, completion, completion_cost, embedding
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.factory import anthropic_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
# litellm.num_retries=3
@ -696,7 +696,7 @@ async def test_anthropic_no_content_error():
def test_parse_xml_params():
from litellm.llms.prompt_templates.factory import parse_xml_params
from litellm.litellm_core_utils.prompt_templates.factory import parse_xml_params
## SCENARIO 1 ## - W/ ARRAY
xml_content = """<invoke><tool_name>return_list_of_str</tool_name>\n<parameters>\n<value>\n<item>apple</item>\n<item>banana</item>\n<item>orange</item>\n</value>\n</parameters></invoke>"""

View file

@ -9,7 +9,7 @@ sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest
from litellm.llms.prompt_templates.factory import prompt_factory
from litellm.litellm_core_utils.prompt_templates.factory import prompt_factory
def test_prompt_formatting():

View file

@ -23,7 +23,7 @@ import pytest
import litellm
from litellm import RateLimitError, Timeout, completion, completion_cost, embedding
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.llms.prompt_templates.factory import anthropic_messages_pt
from litellm.litellm_core_utils.prompt_templates.factory import anthropic_messages_pt
# litellm.num_retries =3
litellm.cache = None