mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Merge pull request #5026 from BerriAI/litellm_fix_whisper_caching
[Fix] Whisper Caching - Use correct cache keys for checking request in cache
This commit is contained in:
commit
7ec1f241fc
4 changed files with 58 additions and 10 deletions
|
@ -553,7 +553,8 @@ def function_setup(
|
|||
or call_type == CallTypes.transcription.value
|
||||
):
|
||||
_file_name: BinaryIO = args[1] if len(args) > 1 else kwargs["file"]
|
||||
messages = "audio_file"
|
||||
file_name = getattr(_file_name, "name", "audio_file")
|
||||
messages = file_name
|
||||
elif (
|
||||
call_type == CallTypes.aspeech.value or call_type == CallTypes.speech.value
|
||||
):
|
||||
|
@ -1213,6 +1214,7 @@ def client(original_function):
|
|||
hidden_params = {
|
||||
"model": "whisper-1",
|
||||
"custom_llm_provider": custom_llm_provider,
|
||||
"cache_hit": True,
|
||||
}
|
||||
cached_result = convert_to_model_response_object(
|
||||
response_object=cached_result,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue