mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 12 25 2025 p2 (#7420)
* test: add new test image embedding to base llm unit tests Addresses https://github.com/BerriAI/litellm/issues/6515 * fix(bedrock/embed/multimodal-embeddings): strip data prefix from image urls for bedrock multimodal embeddings Fix https://github.com/BerriAI/litellm/issues/6515 * feat: initial commit for fireworks ai audio transcription support Relevant issue: https://github.com/BerriAI/litellm/issues/7134 * test: initial fireworks ai test * feat(fireworks_ai/): implemented fireworks ai audio transcription config * fix(utils.py): register fireworks ai audio transcription config, in config manager * fix(utils.py): add fireworks ai param translation to 'get_optional_params_transcription' * refactor(fireworks_ai/): define text completion route with model name handling moves model name handling to specific fireworks routes, as required by their api * refactor(fireworks_ai/chat): define transform_Request - allows fixing model if accounts/ is missing * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix(handler.py): fix linting errors * fix(main.py): fix tgai text completion route * refactor(together_ai/completion): refactors together ai text completion route to just use provider transform request * refactor: move test_fine_tuning_api out of local_testing reduces local testing ci/cd time
This commit is contained in:
parent
9237357bcc
commit
760328b6ad
33 changed files with 709 additions and 177 deletions
|
@ -110,10 +110,12 @@ from litellm.router_utils.get_retry_from_policy import (
|
|||
from litellm.secret_managers.main import get_secret
|
||||
from litellm.types.llms.openai import (
|
||||
AllMessageValues,
|
||||
AllPromptValues,
|
||||
ChatCompletionAssistantToolCall,
|
||||
ChatCompletionNamedToolChoiceParam,
|
||||
ChatCompletionToolParam,
|
||||
ChatCompletionToolParamFunctionChunk,
|
||||
OpenAITextCompletionUserMessage,
|
||||
)
|
||||
from litellm.types.rerank import RerankResponse
|
||||
from litellm.types.utils import FileTypes # type: ignore
|
||||
|
@ -169,7 +171,11 @@ from typing import (
|
|||
|
||||
from openai import OpenAIError as OriginalError
|
||||
|
||||
from litellm.llms.base_llm.audio_transcription.transformation import (
|
||||
BaseAudioTranscriptionConfig,
|
||||
)
|
||||
from litellm.llms.base_llm.chat.transformation import BaseConfig
|
||||
from litellm.llms.base_llm.completion.transformation import BaseTextCompletionConfig
|
||||
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
|
||||
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
|
||||
|
||||
|
@ -1825,6 +1831,19 @@ def supports_vision(model: str, custom_llm_provider: Optional[str] = None) -> bo
|
|||
return False
|
||||
|
||||
|
||||
def supports_embedding_image_input(
|
||||
model: str, custom_llm_provider: Optional[str] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Check if the given model supports embedding image input and return a boolean value.
|
||||
"""
|
||||
return _supports_factory(
|
||||
model=model,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
key="supports_embedding_image_input",
|
||||
)
|
||||
|
||||
|
||||
def supports_parallel_function_calling(model: str):
|
||||
"""
|
||||
Check if the given model supports parallel function calling and return True if it does, False otherwise.
|
||||
|
@ -2081,6 +2100,13 @@ def get_optional_params_transcription(
|
|||
)
|
||||
return non_default_params
|
||||
|
||||
provider_config: Optional[BaseAudioTranscriptionConfig] = None
|
||||
if custom_llm_provider is not None:
|
||||
provider_config = ProviderConfigManager.get_provider_audio_transcription_config(
|
||||
model=model,
|
||||
provider=LlmProviders(custom_llm_provider),
|
||||
)
|
||||
|
||||
if custom_llm_provider == "openai" or custom_llm_provider == "azure":
|
||||
optional_params = non_default_params
|
||||
elif custom_llm_provider == "groq":
|
||||
|
@ -2092,6 +2118,15 @@ def get_optional_params_transcription(
|
|||
model=model,
|
||||
drop_params=drop_params if drop_params is not None else False,
|
||||
)
|
||||
elif provider_config is not None: # handles fireworks ai, and any future providers
|
||||
supported_params = provider_config.get_supported_openai_params(model=model)
|
||||
_check_valid_arg(supported_params=supported_params)
|
||||
optional_params = provider_config.map_openai_params(
|
||||
non_default_params=non_default_params,
|
||||
optional_params=optional_params,
|
||||
model=model,
|
||||
drop_params=drop_params if drop_params is not None else False,
|
||||
)
|
||||
for k in passed_params.keys(): # pass additional kwargs without modification
|
||||
if k not in default_params.keys():
|
||||
optional_params[k] = passed_params[k]
|
||||
|
@ -4432,6 +4467,9 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
supports_audio_input=_model_info.get("supports_audio_input", False),
|
||||
supports_audio_output=_model_info.get("supports_audio_output", False),
|
||||
supports_pdf_input=_model_info.get("supports_pdf_input", False),
|
||||
supports_embedding_image_input=_model_info.get(
|
||||
"supports_embedding_image_input", False
|
||||
),
|
||||
tpm=_model_info.get("tpm", None),
|
||||
rpm=_model_info.get("rpm", None),
|
||||
)
|
||||
|
@ -6000,6 +6038,15 @@ def is_base64_encoded(s: str) -> bool:
|
|||
return False
|
||||
|
||||
|
||||
def get_base64_str(s: str) -> str:
|
||||
"""
|
||||
s: b64str OR data:image/png;base64,b64str
|
||||
"""
|
||||
if "," in s:
|
||||
return s.split(",")[1]
|
||||
return s
|
||||
|
||||
|
||||
def has_tool_call_blocks(messages: List[AllMessageValues]) -> bool:
|
||||
"""
|
||||
Returns true, if messages has tool call blocks.
|
||||
|
@ -6268,6 +6315,26 @@ class ProviderConfigManager:
|
|||
return litellm.InfinityRerankConfig()
|
||||
return litellm.CohereRerankConfig()
|
||||
|
||||
@staticmethod
|
||||
def get_provider_audio_transcription_config(
|
||||
model: str,
|
||||
provider: LlmProviders,
|
||||
) -> Optional[BaseAudioTranscriptionConfig]:
|
||||
if litellm.LlmProviders.FIREWORKS_AI == provider:
|
||||
return litellm.FireworksAIAudioTranscriptionConfig()
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_provider_text_completion_config(
|
||||
model: str,
|
||||
provider: LlmProviders,
|
||||
) -> BaseTextCompletionConfig:
|
||||
if LlmProviders.FIREWORKS_AI == provider:
|
||||
return litellm.FireworksAITextCompletionConfig()
|
||||
elif LlmProviders.TOGETHER_AI == provider:
|
||||
return litellm.TogetherAITextCompletionConfig()
|
||||
return litellm.OpenAITextCompletionConfig()
|
||||
|
||||
|
||||
def get_end_user_id_for_cost_tracking(
|
||||
litellm_params: dict,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue