From a0c5fee61d9115b8df44a64dda8c4c9fd26b034a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hakan=20Ta=C5=9Fk=C3=B6pr=C3=BC?= Date: Tue, 22 Oct 2024 14:11:54 +0300 Subject: [PATCH] Refactor: apply early return (#6369) --- .../llm_response_utils/get_headers.py | 43 ++++++++++--------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/litellm/litellm_core_utils/llm_response_utils/get_headers.py b/litellm/litellm_core_utils/llm_response_utils/get_headers.py index 58a5f1715..cd49b5a4a 100644 --- a/litellm/litellm_core_utils/llm_response_utils/get_headers.py +++ b/litellm/litellm_core_utils/llm_response_utils/get_headers.py @@ -15,27 +15,28 @@ def get_response_headers(_response_headers: Optional[dict] = None) -> dict: dict: _response_headers with OpenAI headers and llm_provider-{header} """ - if _response_headers is not None: - openai_headers = {} - if "x-ratelimit-limit-requests" in _response_headers: - openai_headers["x-ratelimit-limit-requests"] = _response_headers[ - "x-ratelimit-limit-requests" - ] - if "x-ratelimit-remaining-requests" in _response_headers: - openai_headers["x-ratelimit-remaining-requests"] = _response_headers[ - "x-ratelimit-remaining-requests" - ] - if "x-ratelimit-limit-tokens" in _response_headers: - openai_headers["x-ratelimit-limit-tokens"] = _response_headers[ - "x-ratelimit-limit-tokens" - ] - if "x-ratelimit-remaining-tokens" in _response_headers: - openai_headers["x-ratelimit-remaining-tokens"] = _response_headers[ - "x-ratelimit-remaining-tokens" - ] - llm_provider_headers = _get_llm_provider_headers(_response_headers) - return {**llm_provider_headers, **openai_headers} - return {} + if _response_headers is None: + return {} + + openai_headers = {} + if "x-ratelimit-limit-requests" in _response_headers: + openai_headers["x-ratelimit-limit-requests"] = _response_headers[ + "x-ratelimit-limit-requests" + ] + if "x-ratelimit-remaining-requests" in _response_headers: + openai_headers["x-ratelimit-remaining-requests"] = _response_headers[ + "x-ratelimit-remaining-requests" + ] + if "x-ratelimit-limit-tokens" in _response_headers: + openai_headers["x-ratelimit-limit-tokens"] = _response_headers[ + "x-ratelimit-limit-tokens" + ] + if "x-ratelimit-remaining-tokens" in _response_headers: + openai_headers["x-ratelimit-remaining-tokens"] = _response_headers[ + "x-ratelimit-remaining-tokens" + ] + llm_provider_headers = _get_llm_provider_headers(_response_headers) + return {**llm_provider_headers, **openai_headers} def _get_llm_provider_headers(response_headers: dict) -> dict: