mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(main.py): fixing print_verbose
This commit is contained in:
parent
763ecf681a
commit
5b3978eff4
5 changed files with 240 additions and 222 deletions
|
@ -52,6 +52,7 @@ def is_port_in_use(port):
|
|||
@click.command()
|
||||
@click.option('--host', default='0.0.0.0', help='Host for the server to listen on.')
|
||||
@click.option('--port', default=8000, help='Port to bind the server to.')
|
||||
@click.option('--num_workers', default=1, help='Number of uvicorn workers to spin up')
|
||||
@click.option('--api_base', default=None, help='API base URL.')
|
||||
@click.option('--api_version', default="2023-07-01-preview", help='For azure - pass in the api version.')
|
||||
@click.option('--model', '-m', default=None, help='The model name to pass to litellm expects')
|
||||
|
@ -74,17 +75,17 @@ def is_port_in_use(port):
|
|||
@click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to')
|
||||
@click.option('--local', is_flag=True, default=False, help='for local debugging')
|
||||
@click.option('--cost', is_flag=True, default=False, help='for viewing cost logs')
|
||||
def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, create_proxy, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, cost):
|
||||
def run_server(host, port, api_base, api_version, model, alias, add_key, headers, save, debug, temperature, max_tokens, request_timeout, drop_params, create_proxy, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, cost, num_workers):
|
||||
global feature_telemetry
|
||||
args = locals()
|
||||
if local:
|
||||
from proxy_server import app, initialize, print_cost_logs, usage_telemetry, add_keys_to_config
|
||||
from proxy_server import app, save_worker_config, print_cost_logs, usage_telemetry, add_keys_to_config
|
||||
debug = True
|
||||
else:
|
||||
try:
|
||||
from .proxy_server import app, initialize, print_cost_logs, usage_telemetry, add_keys_to_config
|
||||
from .proxy_server import app, save_worker_config, print_cost_logs, usage_telemetry, add_keys_to_config
|
||||
except ImportError as e:
|
||||
from proxy_server import app, initialize, print_cost_logs, usage_telemetry, add_keys_to_config
|
||||
from proxy_server import app, save_worker_config, print_cost_logs, usage_telemetry, add_keys_to_config
|
||||
feature_telemetry = usage_telemetry
|
||||
if create_proxy == True:
|
||||
repo_url = 'https://github.com/BerriAI/litellm'
|
||||
|
@ -163,7 +164,7 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers
|
|||
else:
|
||||
if headers:
|
||||
headers = json.loads(headers)
|
||||
initialize(model=model, alias=alias, api_base=api_base, api_version=api_version, debug=debug, temperature=temperature, max_tokens=max_tokens, request_timeout=request_timeout, max_budget=max_budget, telemetry=telemetry, drop_params=drop_params, add_function_to_prompt=add_function_to_prompt, headers=headers, save=save, config=config)
|
||||
save_worker_config(model=model, alias=alias, api_base=api_base, api_version=api_version, debug=debug, temperature=temperature, max_tokens=max_tokens, request_timeout=request_timeout, max_budget=max_budget, telemetry=telemetry, drop_params=drop_params, add_function_to_prompt=add_function_to_prompt, headers=headers, save=save, config=config)
|
||||
try:
|
||||
import uvicorn
|
||||
except:
|
||||
|
@ -174,7 +175,7 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers
|
|||
|
||||
if port == 8000 and is_port_in_use(port):
|
||||
port = random.randint(1024, 49152)
|
||||
uvicorn.run(app, host=host, port=port)
|
||||
uvicorn.run("proxy_server:app", host=host, port=port, workers=num_workers)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue