Merge pull request #5026 from BerriAI/litellm_fix_whisper_caching

[Fix] Whisper Caching - Use correct cache keys for checking request in cache
This commit is contained in:
Ishaan Jaff 2024-08-02 17:26:28 -07:00 committed by GitHub
commit 7ec1f241fc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 58 additions and 10 deletions

View file

@ -553,7 +553,8 @@ def function_setup(
or call_type == CallTypes.transcription.value
):
_file_name: BinaryIO = args[1] if len(args) > 1 else kwargs["file"]
messages = "audio_file"
file_name = getattr(_file_name, "name", "audio_file")
messages = file_name
elif (
call_type == CallTypes.aspeech.value or call_type == CallTypes.speech.value
):
@ -1213,6 +1214,7 @@ def client(original_function):
hidden_params = {
"model": "whisper-1",
"custom_llm_provider": custom_llm_provider,
"cache_hit": True,
}
cached_result = convert_to_model_response_object(
response_object=cached_result,