Merge pull request #4927 from BerriAI/litellm_set_max_request_response_size_ui

Feat Enterprise -  set max request  / response size UI
This commit is contained in:
Ishaan Jaff 2024-07-27 20:06:09 -07:00 committed by GitHub
commit 096844c258
2 changed files with 10 additions and 0 deletions

View file

@ -1144,6 +1144,14 @@ class ConfigGeneralSettings(LiteLLMBase):
global_max_parallel_requests: Optional[int] = Field( global_max_parallel_requests: Optional[int] = Field(
None, description="global max parallel requests to allow for a proxy instance." None, description="global max parallel requests to allow for a proxy instance."
) )
max_request_size_mb: Optional[int] = Field(
None,
description="max request size in MB, if a request is larger than this size it will be rejected",
)
max_response_size_mb: Optional[int] = Field(
None,
description="max response size in MB, if a response is larger than this size it will be rejected",
)
infer_model_from_keys: Optional[bool] = Field( infer_model_from_keys: Optional[bool] = Field(
None, None,
description="for `/models` endpoint, infers available model based on environment keys (e.g. OPENAI_API_KEY)", description="for `/models` endpoint, infers available model based on environment keys (e.g. OPENAI_API_KEY)",

View file

@ -9083,6 +9083,8 @@ async def get_config_list(
allowed_args = { allowed_args = {
"max_parallel_requests": {"type": "Integer"}, "max_parallel_requests": {"type": "Integer"},
"global_max_parallel_requests": {"type": "Integer"}, "global_max_parallel_requests": {"type": "Integer"},
"max_request_size_mb": {"type": "Integer"},
"max_response_size_mb": {"type": "Integer"},
} }
return_val = [] return_val = []