mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(Refactor) Code Quality improvement - rename text_completion_codestral.py
-> codestral/completion/
(#7172)
* rename files * fix codestral fim organization * fix CodestralTextCompletionConfig * fix import CodestralTextCompletion * fix BaseLLM * fix imports * fix CodestralTextCompletionConfig * fix imports CodestralTextCompletion
This commit is contained in:
parent
400eb28a91
commit
78d132c1fb
10 changed files with 164 additions and 162 deletions
|
@ -56,8 +56,10 @@ from litellm.litellm_core_utils.mock_functions import (
|
|||
mock_embedding,
|
||||
mock_image_generation,
|
||||
)
|
||||
from litellm.litellm_core_utils.prompt_templates.common_utils import (
|
||||
get_content_from_model_response,
|
||||
)
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
||||
from litellm.litellm_core_utils.prompt_templates.common_utils import get_content_from_model_response
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.utils import (
|
||||
CustomStreamWrapper,
|
||||
|
@ -82,6 +84,15 @@ from litellm.utils import (
|
|||
|
||||
from ._logging import verbose_logger
|
||||
from .caching.caching import disable_cache, enable_cache, update_cache
|
||||
from .litellm_core_utils.prompt_templates.common_utils import get_completion_messages
|
||||
from .litellm_core_utils.prompt_templates.factory import (
|
||||
custom_prompt,
|
||||
function_call_prompt,
|
||||
map_system_message_pt,
|
||||
ollama_pt,
|
||||
prompt_factory,
|
||||
stringify_json_tool_call_content,
|
||||
)
|
||||
from .litellm_core_utils.streaming_chunk_builder_utils import ChunkProcessor
|
||||
from .llms import baseten, maritalk, ollama_chat, petals
|
||||
from .llms.anthropic.chat import AnthropicChatCompletion
|
||||
|
@ -93,6 +104,7 @@ from .llms.azure_ai.embed import AzureAIEmbedding
|
|||
from .llms.bedrock.chat import BedrockConverseLLM, BedrockLLM
|
||||
from .llms.bedrock.embed.embedding import BedrockEmbedding
|
||||
from .llms.bedrock.image.image_handler import BedrockImageGeneration
|
||||
from .llms.codestral.completion.handler import CodestralTextCompletion
|
||||
from .llms.cohere.embed import handler as cohere_embed
|
||||
from .llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
|
||||
from .llms.custom_llm import CustomLLM, custom_chat_llm_router
|
||||
|
@ -102,33 +114,21 @@ from .llms.deprecated_providers import palm, aleph_alpha
|
|||
from .llms.groq.chat.handler import GroqChatCompletion
|
||||
from .llms.huggingface.chat.handler import Huggingface
|
||||
from .llms.nlp_cloud.chat.handler import completion as nlp_cloud_chat_completion
|
||||
from .llms.oobabooga.chat import oobabooga
|
||||
from .llms.ollama.completion import handler as ollama
|
||||
from .llms.openai.transcriptions.handler import OpenAIAudioTranscription
|
||||
from .llms.oobabooga.chat import oobabooga
|
||||
from .llms.openai.completion.handler import OpenAITextCompletion
|
||||
from .llms.openai.openai import OpenAIChatCompletion
|
||||
from .llms.openai.transcriptions.handler import OpenAIAudioTranscription
|
||||
from .llms.openai_like.chat.handler import OpenAILikeChatHandler
|
||||
from .llms.openai_like.embedding.handler import OpenAILikeEmbeddingHandler
|
||||
from .llms.predibase import PredibaseChatCompletion
|
||||
from .litellm_core_utils.prompt_templates.common_utils import get_completion_messages
|
||||
from .litellm_core_utils.prompt_templates.factory import (
|
||||
custom_prompt,
|
||||
function_call_prompt,
|
||||
map_system_message_pt,
|
||||
ollama_pt,
|
||||
prompt_factory,
|
||||
stringify_json_tool_call_content,
|
||||
)
|
||||
from .llms.replicate.chat.handler import completion as replicate_chat_completion
|
||||
from .llms.sagemaker.chat.handler import SagemakerChatHandler
|
||||
from .llms.sagemaker.completion.handler import SagemakerLLM
|
||||
from .llms.replicate.chat.handler import completion as replicate_chat_completion
|
||||
from .llms.text_completion_codestral import CodestralTextCompletion
|
||||
from .llms.together_ai.completion.handler import TogetherAITextCompletion
|
||||
from .llms.triton import TritonChatCompletion
|
||||
from .llms.vertex_ai import vertex_ai_non_gemini
|
||||
from .llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||
VertexLLM,
|
||||
)
|
||||
from .llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import VertexLLM
|
||||
from .llms.vertex_ai.gemini_embeddings.batch_embed_content_handler import (
|
||||
GoogleBatchEmbeddings,
|
||||
)
|
||||
|
@ -138,18 +138,10 @@ from .llms.vertex_ai.image_generation.image_generation_handler import (
|
|||
from .llms.vertex_ai.multimodal_embeddings.embedding_handler import (
|
||||
VertexMultimodalEmbedding,
|
||||
)
|
||||
from .llms.vertex_ai.text_to_speech.text_to_speech_handler import (
|
||||
VertexTextToSpeechAPI,
|
||||
)
|
||||
from .llms.vertex_ai.vertex_ai_partner_models.main import (
|
||||
VertexAIPartnerModels,
|
||||
)
|
||||
from .llms.vertex_ai.vertex_embeddings.embedding_handler import (
|
||||
VertexEmbedding,
|
||||
)
|
||||
from .llms.vertex_ai.vertex_model_garden.main import (
|
||||
VertexAIModelGardenModels,
|
||||
)
|
||||
from .llms.vertex_ai.text_to_speech.text_to_speech_handler import VertexTextToSpeechAPI
|
||||
from .llms.vertex_ai.vertex_ai_partner_models.main import VertexAIPartnerModels
|
||||
from .llms.vertex_ai.vertex_embeddings.embedding_handler import VertexEmbedding
|
||||
from .llms.vertex_ai.vertex_model_garden.main import VertexAIModelGardenModels
|
||||
from .llms.vllm.completion import handler as vllm_handler
|
||||
from .llms.watsonx.chat.handler import WatsonXChatHandler
|
||||
from .llms.watsonx.completion.handler import IBMWatsonXAI
|
||||
|
@ -2434,9 +2426,7 @@ def completion( # type: ignore # noqa: PLR0915
|
|||
|
||||
## RESPONSE OBJECT
|
||||
response = model_response
|
||||
elif (
|
||||
custom_llm_provider == "sagemaker"
|
||||
):
|
||||
elif custom_llm_provider == "sagemaker":
|
||||
# boto3 reads keys from .env
|
||||
model_response = sagemaker_llm.completion(
|
||||
model=model,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue