forked from phoenix/litellm-mirror
Refactor: apply early return (#6369)
This commit is contained in:
parent
a5fe87d6a5
commit
a0c5fee61d
1 changed files with 22 additions and 21 deletions
|
@ -15,27 +15,28 @@ def get_response_headers(_response_headers: Optional[dict] = None) -> dict:
|
|||
dict: _response_headers with OpenAI headers and llm_provider-{header}
|
||||
|
||||
"""
|
||||
if _response_headers is not None:
|
||||
openai_headers = {}
|
||||
if "x-ratelimit-limit-requests" in _response_headers:
|
||||
openai_headers["x-ratelimit-limit-requests"] = _response_headers[
|
||||
"x-ratelimit-limit-requests"
|
||||
]
|
||||
if "x-ratelimit-remaining-requests" in _response_headers:
|
||||
openai_headers["x-ratelimit-remaining-requests"] = _response_headers[
|
||||
"x-ratelimit-remaining-requests"
|
||||
]
|
||||
if "x-ratelimit-limit-tokens" in _response_headers:
|
||||
openai_headers["x-ratelimit-limit-tokens"] = _response_headers[
|
||||
"x-ratelimit-limit-tokens"
|
||||
]
|
||||
if "x-ratelimit-remaining-tokens" in _response_headers:
|
||||
openai_headers["x-ratelimit-remaining-tokens"] = _response_headers[
|
||||
"x-ratelimit-remaining-tokens"
|
||||
]
|
||||
llm_provider_headers = _get_llm_provider_headers(_response_headers)
|
||||
return {**llm_provider_headers, **openai_headers}
|
||||
return {}
|
||||
if _response_headers is None:
|
||||
return {}
|
||||
|
||||
openai_headers = {}
|
||||
if "x-ratelimit-limit-requests" in _response_headers:
|
||||
openai_headers["x-ratelimit-limit-requests"] = _response_headers[
|
||||
"x-ratelimit-limit-requests"
|
||||
]
|
||||
if "x-ratelimit-remaining-requests" in _response_headers:
|
||||
openai_headers["x-ratelimit-remaining-requests"] = _response_headers[
|
||||
"x-ratelimit-remaining-requests"
|
||||
]
|
||||
if "x-ratelimit-limit-tokens" in _response_headers:
|
||||
openai_headers["x-ratelimit-limit-tokens"] = _response_headers[
|
||||
"x-ratelimit-limit-tokens"
|
||||
]
|
||||
if "x-ratelimit-remaining-tokens" in _response_headers:
|
||||
openai_headers["x-ratelimit-remaining-tokens"] = _response_headers[
|
||||
"x-ratelimit-remaining-tokens"
|
||||
]
|
||||
llm_provider_headers = _get_llm_provider_headers(_response_headers)
|
||||
return {**llm_provider_headers, **openai_headers}
|
||||
|
||||
|
||||
def _get_llm_provider_headers(response_headers: dict) -> dict:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue