mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
feat(proxy_server): adding model fallbacks and default model to toml
This commit is contained in:
parent
ec925bfa2e
commit
74c0d5b7a0
6 changed files with 14 additions and 2 deletions
|
@ -125,6 +125,11 @@ def load_config():
|
|||
## settings
|
||||
litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt", True) # by default add function to prompt if unsupported by provider
|
||||
litellm.drop_params = user_config["general"].get("drop_params", True) # by default drop params if unsupported by provider
|
||||
litellm.model_fallbacks = user_config["general"].get("fallbacks", None) # fallback models in case initial completion call fails
|
||||
default_model = user_config["general"].get("default_model", None) # route all requests to this model.
|
||||
|
||||
if user_model is None: # `litellm --model <model-name>`` > default_model.
|
||||
user_model = default_model
|
||||
|
||||
## load model config - to set this run `litellm --config`
|
||||
model_config = None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue