mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
* test: add new test image embedding to base llm unit tests Addresses https://github.com/BerriAI/litellm/issues/6515 * fix(bedrock/embed/multimodal-embeddings): strip data prefix from image urls for bedrock multimodal embeddings Fix https://github.com/BerriAI/litellm/issues/6515 * feat: initial commit for fireworks ai audio transcription support Relevant issue: https://github.com/BerriAI/litellm/issues/7134 * test: initial fireworks ai test * feat(fireworks_ai/): implemented fireworks ai audio transcription config * fix(utils.py): register fireworks ai audio transcription config, in config manager * fix(utils.py): add fireworks ai param translation to 'get_optional_params_transcription' * refactor(fireworks_ai/): define text completion route with model name handling moves model name handling to specific fireworks routes, as required by their api * refactor(fireworks_ai/chat): define transform_Request - allows fixing model if accounts/ is missing * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix(handler.py): fix linting errors * fix(main.py): fix tgai text completion route * refactor(together_ai/completion): refactors together ai text completion route to just use provider transform request * refactor: move test_fine_tuning_api out of local_testing reduces local testing ci/cd time
86 lines
2.9 KiB
Python
86 lines
2.9 KiB
Python
import httpx
|
|
import json
|
|
import pytest
|
|
import sys
|
|
from typing import Any, Dict, List
|
|
from unittest.mock import MagicMock, Mock, patch
|
|
import os
|
|
import uuid
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
import litellm
|
|
from litellm import transcription
|
|
from litellm.litellm_core_utils.get_supported_openai_params import (
|
|
get_supported_openai_params,
|
|
)
|
|
from litellm.llms.base_llm.audio_transcription.transformation import (
|
|
BaseAudioTranscriptionConfig,
|
|
)
|
|
from litellm.utils import ProviderConfigManager
|
|
from abc import ABC, abstractmethod
|
|
|
|
pwd = os.path.dirname(os.path.realpath(__file__))
|
|
print(pwd)
|
|
|
|
file_path = os.path.join(pwd, "gettysburg.wav")
|
|
|
|
audio_file = open(file_path, "rb")
|
|
|
|
|
|
class BaseLLMAudioTranscriptionTest(ABC):
|
|
@abstractmethod
|
|
def get_base_audio_transcription_call_args(self) -> dict:
|
|
"""Must return the base audio transcription call args"""
|
|
pass
|
|
|
|
@abstractmethod
|
|
def get_custom_llm_provider(self) -> litellm.LlmProviders:
|
|
"""Must return the custom llm provider"""
|
|
pass
|
|
|
|
def test_audio_transcription(self):
|
|
"""
|
|
Test that the audio transcription is translated correctly.
|
|
"""
|
|
litellm.set_verbose = True
|
|
transcription_call_args = self.get_base_audio_transcription_call_args()
|
|
transcript = transcription(**transcription_call_args, file=audio_file)
|
|
print(f"transcript: {transcript.model_dump()}")
|
|
print(f"transcript hidden params: {transcript._hidden_params}")
|
|
|
|
assert transcript.text is not None
|
|
|
|
def test_audio_transcription_optional_params(self):
|
|
"""
|
|
Test that the audio transcription is translated correctly.
|
|
"""
|
|
transcription_args = self.get_base_audio_transcription_call_args()
|
|
model = transcription_args["model"]
|
|
custom_llm_provider = self.get_custom_llm_provider()
|
|
optional_params = get_supported_openai_params(
|
|
model=model,
|
|
custom_llm_provider=custom_llm_provider.value,
|
|
request_type="transcription",
|
|
)
|
|
print(f"optional_params: {optional_params}")
|
|
assert optional_params is not None
|
|
assert (
|
|
"max_completion_tokens" not in optional_params
|
|
) # assert default chat completion response not returned
|
|
|
|
def test_audio_transcription_config(self):
|
|
"""
|
|
Test that the audio transcription config is implemented and correctly instrumented.
|
|
"""
|
|
transcription_args = self.get_base_audio_transcription_call_args()
|
|
model = transcription_args["model"]
|
|
custom_llm_provider = self.get_custom_llm_provider()
|
|
config = ProviderConfigManager.get_provider_audio_transcription_config(
|
|
model=model,
|
|
provider=custom_llm_provider,
|
|
)
|
|
print(f"config: {config}")
|
|
assert config is not None
|
|
assert isinstance(config, BaseAudioTranscriptionConfig)
|