mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
Litellm dev 01 27 2025 p3 (#8047)
* docs(reliability.md): add doc on disabling fallbacks per request * feat(litellm_pre_call_utils.py): support reading request timeout from request headers - new `x-litellm-timeout` param Allows setting dynamic model timeouts from vercel's AI sdk * test(test_proxy_server.py): add simple unit test for reading request timeout * test(test_fallbacks.py): add e2e test to confirm timeout passed in request headers is correctly read * feat(main.py): support passing metadata to openai in preview Resolves https://github.com/BerriAI/litellm/issues/6022#issuecomment-2616119371 * fix(main.py): fix passing openai metadata * docs(request_headers.md): document new request headers * build: Merge branch 'main' into litellm_dev_01_27_2025_p3 * test: loosen test
This commit is contained in:
parent
9c20c69915
commit
d9eb8f42ff
11 changed files with 187 additions and 3 deletions
|
@ -75,6 +75,7 @@ from litellm.utils import (
|
|||
CustomStreamWrapper,
|
||||
ProviderConfigManager,
|
||||
Usage,
|
||||
add_openai_metadata,
|
||||
async_mock_completion_streaming_obj,
|
||||
convert_to_model_response_object,
|
||||
create_pretrained_tokenizer,
|
||||
|
@ -1617,6 +1618,11 @@ def completion( # type: ignore # noqa: PLR0915
|
|||
if extra_headers is not None:
|
||||
optional_params["extra_headers"] = extra_headers
|
||||
|
||||
if (
|
||||
litellm.enable_preview_features and metadata is not None
|
||||
): # [PREVIEW] allow metadata to be passed to OPENAI
|
||||
optional_params["metadata"] = add_openai_metadata(metadata)
|
||||
|
||||
## LOAD CONFIG - if set
|
||||
config = litellm.OpenAIConfig.get_config()
|
||||
for k, v in config.items():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue