mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
feat(proxy_server.py): enable batch completion fastest response calls on proxy
introduces new `fastest_response` flag for enabling the call
This commit is contained in:
parent
ecd182eb6a
commit
20106715d5
3 changed files with 32 additions and 3 deletions
|
@ -680,6 +680,7 @@ def completion(
|
|||
"region_name",
|
||||
"allowed_model_region",
|
||||
"model_config",
|
||||
"fastest_response",
|
||||
]
|
||||
|
||||
default_params = openai_params + litellm_params
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue