diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 1b922b3a7..b39533e78 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -66,6 +66,9 @@ def open_config(): @click.option('--debug', is_flag=True, help='To debug the input') @click.option('--temperature', default=None, type=float, help='Set temperature for the model') @click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model') +@click.option('--drop_params', is_flag=True, help='Drop any unmapped params') +@click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt') +@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model') @click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`') @click.option('--config', is_flag=True, help='Create and open .env file from .env.template') @click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to') @@ -124,7 +127,7 @@ def run_server(host, port, api_base, model, deploy, debug, temperature, max_toke return else: load_config() - initialize(model, api_base, debug, temperature, max_tokens, telemetry) + initialize(model, api_base, debug, temperature, max_tokens, telemetry, ) try: diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index cd7a537a6..d3b6cdd21 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -45,7 +45,7 @@ def usage_telemetry(): # helps us know if people are using this feature. Set `li } litellm.utils.litellm_telemetry(data=data) -def initialize(model, api_base, debug, temperature, max_tokens, telemetry): +def initialize(model, api_base, debug, temperature, max_tokens, telemetry, drop_params, add_function_to_prompt): global user_model, user_api_base, user_debug, user_max_tokens, user_temperature, user_telemetry user_model = model user_api_base = api_base @@ -54,6 +54,10 @@ def initialize(model, api_base, debug, temperature, max_tokens, telemetry): user_temperature = temperature user_telemetry = telemetry usage_telemetry() + if drop_params == True: + litellm.drop_params = True + if add_function_to_prompt == True: + litellm.add_function_to_prompt = True def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, deploy): import requests