forked from phoenix/litellm-mirror
Refactor: apply early return (#6369)
This commit is contained in:
parent
a5fe87d6a5
commit
a0c5fee61d
1 changed files with 22 additions and 21 deletions
|
@ -15,7 +15,9 @@ def get_response_headers(_response_headers: Optional[dict] = None) -> dict:
|
||||||
dict: _response_headers with OpenAI headers and llm_provider-{header}
|
dict: _response_headers with OpenAI headers and llm_provider-{header}
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if _response_headers is not None:
|
if _response_headers is None:
|
||||||
|
return {}
|
||||||
|
|
||||||
openai_headers = {}
|
openai_headers = {}
|
||||||
if "x-ratelimit-limit-requests" in _response_headers:
|
if "x-ratelimit-limit-requests" in _response_headers:
|
||||||
openai_headers["x-ratelimit-limit-requests"] = _response_headers[
|
openai_headers["x-ratelimit-limit-requests"] = _response_headers[
|
||||||
|
@ -35,7 +37,6 @@ def get_response_headers(_response_headers: Optional[dict] = None) -> dict:
|
||||||
]
|
]
|
||||||
llm_provider_headers = _get_llm_provider_headers(_response_headers)
|
llm_provider_headers = _get_llm_provider_headers(_response_headers)
|
||||||
return {**llm_provider_headers, **openai_headers}
|
return {**llm_provider_headers, **openai_headers}
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _get_llm_provider_headers(response_headers: dict) -> dict:
|
def _get_llm_provider_headers(response_headers: dict) -> dict:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue