diff --git a/litellm/litellm_core_utils/llm_response_utils/get_headers.py b/litellm/litellm_core_utils/llm_response_utils/get_headers.py index 58a5f1715..cd49b5a4a 100644 --- a/litellm/litellm_core_utils/llm_response_utils/get_headers.py +++ b/litellm/litellm_core_utils/llm_response_utils/get_headers.py @@ -15,27 +15,28 @@ def get_response_headers(_response_headers: Optional[dict] = None) -> dict: dict: _response_headers with OpenAI headers and llm_provider-{header} """ - if _response_headers is not None: - openai_headers = {} - if "x-ratelimit-limit-requests" in _response_headers: - openai_headers["x-ratelimit-limit-requests"] = _response_headers[ - "x-ratelimit-limit-requests" - ] - if "x-ratelimit-remaining-requests" in _response_headers: - openai_headers["x-ratelimit-remaining-requests"] = _response_headers[ - "x-ratelimit-remaining-requests" - ] - if "x-ratelimit-limit-tokens" in _response_headers: - openai_headers["x-ratelimit-limit-tokens"] = _response_headers[ - "x-ratelimit-limit-tokens" - ] - if "x-ratelimit-remaining-tokens" in _response_headers: - openai_headers["x-ratelimit-remaining-tokens"] = _response_headers[ - "x-ratelimit-remaining-tokens" - ] - llm_provider_headers = _get_llm_provider_headers(_response_headers) - return {**llm_provider_headers, **openai_headers} - return {} + if _response_headers is None: + return {} + + openai_headers = {} + if "x-ratelimit-limit-requests" in _response_headers: + openai_headers["x-ratelimit-limit-requests"] = _response_headers[ + "x-ratelimit-limit-requests" + ] + if "x-ratelimit-remaining-requests" in _response_headers: + openai_headers["x-ratelimit-remaining-requests"] = _response_headers[ + "x-ratelimit-remaining-requests" + ] + if "x-ratelimit-limit-tokens" in _response_headers: + openai_headers["x-ratelimit-limit-tokens"] = _response_headers[ + "x-ratelimit-limit-tokens" + ] + if "x-ratelimit-remaining-tokens" in _response_headers: + openai_headers["x-ratelimit-remaining-tokens"] = _response_headers[ + "x-ratelimit-remaining-tokens" + ] + llm_provider_headers = _get_llm_provider_headers(_response_headers) + return {**llm_provider_headers, **openai_headers} def _get_llm_provider_headers(response_headers: dict) -> dict: