diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index d8a3221f4a..0ad93e0a4c 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 7e2174fb4a..99e08f1adf 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/proxy/cost.log b/litellm/proxy/cost.log index ec39807a1c..365bc3e70e 100644 --- a/litellm/proxy/cost.log +++ b/litellm/proxy/cost.log @@ -7,3 +7,5 @@ 2023-10-11 15:14:04 - Model gpt-4 Cost: $0.03291000 2023-10-11 15:18:16 - Model gpt-4 Cost: $0.03669000 2023-10-11 15:19:12 - Model gpt-4 Cost: $0.04806000 +2023-10-11 21:11:06 - Model claude-2 Cost: $0.00041534 +2023-10-11 21:15:34 - Model claude-2 Cost: $0.00054606 diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 290f9671ab..644641f978 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -36,10 +36,14 @@ def open_config(): print(f"Failed to copy .template.secrets.toml: {e}") # Open the .env file in the default editor - if os.name == 'nt': # For Windows - os.startfile(user_config_path) - elif os.name == 'posix': # For MacOS, Linux, and anything using Bash - subprocess.call(('open', '-t', user_config_path)) + try: + if os.name == 'nt': # For Windows + os.startfile(user_config_path) + elif os.name == 'posix': # For MacOS, Linux, and anything using Bash + subprocess.call(('open', '-t', user_config_path)) + except: + pass + print(f"LiteLLM: Proxy Server Config - {user_config_path}") def is_port_in_use(port): import socket diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index f71218ef01..1186e3fcd9 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -353,7 +353,7 @@ async def completion(request: Request): @router.post("/chat/completions") async def chat_completion(request: Request): data = await request.json() - print_verbose(f"data passed in: {data}") + print(f"data passed in: {data}") response = litellm_completion(data, type="chat_completion") return response