diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index fe6536ba9..865d807fe 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -157,7 +157,12 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers ) for chunk in response: click.echo(f'LiteLLM: streaming response from proxy {chunk}') - return + + # response = openai.Completion.create(model="gpt-3.5-turbo", prompt='this is a test request, write a short poem', stream=True) + + # for chunk in response: + # click.echo(f'LiteLLM: streaming response from proxy {chunk}') + # return else: if headers: headers = json.loads(headers)