forked from phoenix/litellm-mirror
feat(utils.py): support passing openai response headers to client, if enabled
Allows openai/openai-compatible provider response headers to be sent to client, if 'return_response_headers' is enabled
This commit is contained in:
parent
b137207ae6
commit
dcb974dd1e
3 changed files with 12 additions and 0 deletions
|
@ -7,3 +7,4 @@ model_list:
|
|||
litellm_settings:
|
||||
callbacks: ["logfire"]
|
||||
redact_user_api_key_info: true
|
||||
return_response_headers: true
|
||||
|
|
|
@ -896,6 +896,9 @@ async def test_completion_cost_hidden_params(sync_mode):
|
|||
|
||||
assert "response_cost" in response._hidden_params
|
||||
assert isinstance(response._hidden_params["response_cost"], float)
|
||||
assert isinstance(
|
||||
response._hidden_params["llm_provider-x-ratelimit-remaining-requests"], float
|
||||
)
|
||||
|
||||
|
||||
def test_vertex_ai_gemini_predict_cost():
|
||||
|
|
|
@ -5678,6 +5678,14 @@ def convert_to_model_response_object(
|
|||
_response_headers: Optional[dict] = None,
|
||||
):
|
||||
received_args = locals()
|
||||
if _response_headers is not None:
|
||||
if hidden_params is not None:
|
||||
hidden_params["additional_headers"] = {
|
||||
"{}-{}".format("llm_provider", k): v
|
||||
for k, v in _response_headers.items()
|
||||
}
|
||||
else:
|
||||
hidden_params = {"additional_headers": _response_headers}
|
||||
### CHECK IF ERROR IN RESPONSE ### - openrouter returns these in the dictionary
|
||||
if (
|
||||
response_object is not None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue