Litellm dev 12 17 2024 p2 (#7277)

* fix(openai/transcription/handler.py): call 'log_pre_api_call' on async calls

* fix(openai/transcriptions/handler.py): call 'logging.pre_call' on sync whisper calls as well

* fix(proxy_cli.py): remove default proxy_cli timeout param

gets passed in as a dynamic request timeout and overrides config values

* fix(langfuse.py): pass litellm httpx client - contains ssl certs (#7052)

Fixes https://github.com/BerriAI/litellm/issues/7046
This commit is contained in:
Krish Dholakia 2024-12-17 14:05:14 -08:00 committed by GitHub
parent 03e711e3e4
commit 57809cfbf4
5 changed files with 67 additions and 1 deletions

View file

@ -4632,6 +4632,11 @@ def transcription(
Allows router to load balance between them
"""
litellm_call_id = kwargs.get("litellm_call_id", None)
proxy_server_request = kwargs.get("proxy_server_request", None)
model_info = kwargs.get("model_info", None)
metadata = kwargs.get("metadata", None)
atranscription = kwargs.get("atranscription", False)
atranscription = kwargs.get("atranscription", False)
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
extra_headers = kwargs.get("extra_headers", None)
@ -4673,6 +4678,22 @@ def transcription(
drop_params=drop_params,
)
litellm_logging_obj.update_environment_variables(
model=model,
user=user,
optional_params={},
litellm_params={
"litellm_call_id": litellm_call_id,
"proxy_server_request": proxy_server_request,
"model_info": model_info,
"metadata": metadata,
"preset_cache_key": None,
"stream_response": {},
**kwargs,
},
custom_llm_provider=custom_llm_provider,
)
response: Optional[TranscriptionResponse] = None
if custom_llm_provider == "azure":
# azure configs