mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 01 27 2025 p3 (#8047)
* docs(reliability.md): add doc on disabling fallbacks per request * feat(litellm_pre_call_utils.py): support reading request timeout from request headers - new `x-litellm-timeout` param Allows setting dynamic model timeouts from vercel's AI sdk * test(test_proxy_server.py): add simple unit test for reading request timeout * test(test_fallbacks.py): add e2e test to confirm timeout passed in request headers is correctly read * feat(main.py): support passing metadata to openai in preview Resolves https://github.com/BerriAI/litellm/issues/6022#issuecomment-2616119371 * fix(main.py): fix passing openai metadata * docs(request_headers.md): document new request headers * build: Merge branch 'main' into litellm_dev_01_27_2025_p3 * test: loosen test
This commit is contained in:
parent
9c20c69915
commit
d9eb8f42ff
11 changed files with 187 additions and 3 deletions
|
@ -2190,3 +2190,19 @@ async def test_get_ui_settings_spend_logs_threshold():
|
|||
|
||||
# Clean up
|
||||
proxy_state.set_proxy_state_variable("spend_logs_row_count", 0)
|
||||
|
||||
|
||||
def test_get_timeout_from_request():
|
||||
from litellm.proxy.litellm_pre_call_utils import LiteLLMProxyRequestSetup
|
||||
|
||||
headers = {
|
||||
"x-litellm-timeout": "90",
|
||||
}
|
||||
timeout = LiteLLMProxyRequestSetup._get_timeout_from_request(headers)
|
||||
assert timeout == 90
|
||||
|
||||
headers = {
|
||||
"x-litellm-timeout": "90.5",
|
||||
}
|
||||
timeout = LiteLLMProxyRequestSetup._get_timeout_from_request(headers)
|
||||
assert timeout == 90.5
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue