fix(proxy_cli.py): fix adding keys flow - let user use --add_key to add new keys

This commit is contained in:
Krrish Dholakia 2023-10-13 22:21:36 -07:00
parent cacfac5726
commit 3210ebfc7a
5 changed files with 1106 additions and 6 deletions

File diff suppressed because it is too large Load diff

View file

@ -88,6 +88,7 @@ def is_port_in_use(port):
@click.option('--port', default=8000, help='Port to bind the server to.') @click.option('--port', default=8000, help='Port to bind the server to.')
@click.option('--api_base', default=None, help='API base URL.') @click.option('--api_base', default=None, help='API base URL.')
@click.option('--model', default=None, help='The model name to pass to litellm expects') @click.option('--model', default=None, help='The model name to pass to litellm expects')
@click.option('--add_key', default=None, help='The model name to pass to litellm expects')
@click.option('--deploy', is_flag=True, type=bool, help='Get a deployed proxy endpoint - api.litellm.ai') @click.option('--deploy', is_flag=True, type=bool, help='Get a deployed proxy endpoint - api.litellm.ai')
@click.option('--debug', default=False, is_flag=True, type=bool, help='To debug the input') @click.option('--debug', default=False, is_flag=True, type=bool, help='To debug the input')
@click.option('--temperature', default=None, type=float, help='Set temperature for the model') @click.option('--temperature', default=None, type=float, help='Set temperature for the model')
@ -103,19 +104,17 @@ def is_port_in_use(port):
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to') @click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
@click.option('--local', is_flag=True, default=False, help='for local debugging') @click.option('--local', is_flag=True, default=False, help='for local debugging')
@click.option('--cost', is_flag=True, default=False, help='for viewing cost logs') @click.option('--cost', is_flag=True, default=False, help='for viewing cost logs')
def run_server(host, port, api_base, model, deploy, debug, temperature, max_tokens, drop_params, create_proxy, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, cost): def run_server(host, port, api_base, model, add_key, deploy, debug, temperature, max_tokens, drop_params, create_proxy, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, cost):
global feature_telemetry global feature_telemetry
args = locals() args = locals()
print(f"args: {args}")
print(f"logs: {logs}")
if local: if local:
from proxy_server import app, initialize, deploy_proxy, print_cost_logs, usage_telemetry from proxy_server import app, initialize, deploy_proxy, print_cost_logs, usage_telemetry, add_keys_to_config
debug = True debug = True
else: else:
try: try:
from .proxy_server import app, initialize, deploy_proxy, print_cost_logs, usage_telemetry from .proxy_server import app, initialize, deploy_proxy, print_cost_logs, usage_telemetry, add_keys_to_config
except ImportError as e: except ImportError as e:
from proxy_server import app, initialize, deploy_proxy, print_cost_logs, usage_telemetry from proxy_server import app, initialize, deploy_proxy, print_cost_logs, usage_telemetry, add_keys_to_config
feature_telemetry = usage_telemetry feature_telemetry = usage_telemetry
if create_proxy == True: if create_proxy == True:
repo_url = 'https://github.com/BerriAI/litellm' repo_url = 'https://github.com/BerriAI/litellm'
@ -147,6 +146,13 @@ def run_server(host, port, api_base, model, deploy, debug, temperature, max_toke
print(json.dumps(recent_logs, indent=4)) print(json.dumps(recent_logs, indent=4))
return return
if add_key:
key_name, key_value = add_key.split("=")
add_keys_to_config(key_name, key_value)
with open(user_config_path) as f:
print(f.read())
print("\033[1;32mDone successfully\033[0m")
return
if deploy == True: if deploy == True:
print(f"\033[32mLiteLLM: Deploying your proxy to api.litellm.ai\033[0m\n") print(f"\033[32mLiteLLM: Deploying your proxy to api.litellm.ai\033[0m\n")
print(f"\033[32mLiteLLM: Deploying proxy for model: {model}\033[0m\n") print(f"\033[32mLiteLLM: Deploying proxy for model: {model}\033[0m\n")

View file

@ -106,6 +106,27 @@ def usage_telemetry(feature: str): # helps us know if people are using this feat
} }
threading.Thread(target=litellm.utils.litellm_telemetry, args=(data,), daemon=True).start() threading.Thread(target=litellm.utils.litellm_telemetry, args=(data,), daemon=True).start()
def add_keys_to_config(key, value):
# Check if file exists
if os.path.exists(user_config_path):
# Load existing file
with open(user_config_path, "rb") as f:
config = tomllib.load(f)
else:
# File doesn't exist, create empty config
config = {}
# Add new key
config.setdefault('keys', {})[key] = value
# Write config to file
with open(user_config_path, 'w') as f:
for section, data in config.items():
f.write('[%s]\n' % section)
for k, v in data.items():
f.write('%s = "%s"\n' % (k, v))
def load_config(): def load_config():
try: try:
global user_config, user_api_base, user_max_tokens, user_temperature, user_model global user_config, user_api_base, user_max_tokens, user_temperature, user_model