mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(proxy_cli.py): add drop params and add function to prompt in cli (complete issue)
https://github.com/BerriAI/litellm/issues/557
This commit is contained in:
parent
72ac4fe781
commit
3d809707c0
2 changed files with 9 additions and 2 deletions
|
@ -66,6 +66,9 @@ def open_config():
|
||||||
@click.option('--debug', is_flag=True, help='To debug the input')
|
@click.option('--debug', is_flag=True, help='To debug the input')
|
||||||
@click.option('--temperature', default=None, type=float, help='Set temperature for the model')
|
@click.option('--temperature', default=None, type=float, help='Set temperature for the model')
|
||||||
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
|
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
|
||||||
|
@click.option('--drop_params', is_flag=True, help='Drop any unmapped params')
|
||||||
|
@click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt')
|
||||||
|
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
|
||||||
@click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`')
|
@click.option('--telemetry', default=True, type=bool, help='Helps us know if people are using this feature. Turn this off by doing `--telemetry False`')
|
||||||
@click.option('--config', is_flag=True, help='Create and open .env file from .env.template')
|
@click.option('--config', is_flag=True, help='Create and open .env file from .env.template')
|
||||||
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
|
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
|
||||||
|
@ -124,7 +127,7 @@ def run_server(host, port, api_base, model, deploy, debug, temperature, max_toke
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
load_config()
|
load_config()
|
||||||
initialize(model, api_base, debug, temperature, max_tokens, telemetry)
|
initialize(model, api_base, debug, temperature, max_tokens, telemetry, )
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -45,7 +45,7 @@ def usage_telemetry(): # helps us know if people are using this feature. Set `li
|
||||||
}
|
}
|
||||||
litellm.utils.litellm_telemetry(data=data)
|
litellm.utils.litellm_telemetry(data=data)
|
||||||
|
|
||||||
def initialize(model, api_base, debug, temperature, max_tokens, telemetry):
|
def initialize(model, api_base, debug, temperature, max_tokens, telemetry, drop_params, add_function_to_prompt):
|
||||||
global user_model, user_api_base, user_debug, user_max_tokens, user_temperature, user_telemetry
|
global user_model, user_api_base, user_debug, user_max_tokens, user_temperature, user_telemetry
|
||||||
user_model = model
|
user_model = model
|
||||||
user_api_base = api_base
|
user_api_base = api_base
|
||||||
|
@ -54,6 +54,10 @@ def initialize(model, api_base, debug, temperature, max_tokens, telemetry):
|
||||||
user_temperature = temperature
|
user_temperature = temperature
|
||||||
user_telemetry = telemetry
|
user_telemetry = telemetry
|
||||||
usage_telemetry()
|
usage_telemetry()
|
||||||
|
if drop_params == True:
|
||||||
|
litellm.drop_params = True
|
||||||
|
if add_function_to_prompt == True:
|
||||||
|
litellm.add_function_to_prompt = True
|
||||||
|
|
||||||
def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, deploy):
|
def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, deploy):
|
||||||
import requests
|
import requests
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue