build(litellm_server/main.py): adding print_verbose for better logging

This commit is contained in:
Krrish Dholakia 2023-10-28 13:43:51 -07:00
parent 6fdca38442
commit 80c9c58f88
2 changed files with 10 additions and 3 deletions

View file

@ -9,7 +9,7 @@ from typing import Optional
try:
from utils import set_callbacks, load_router_config
except ImportError:
from litellm_server.utils import set_callbacks, load_router_config
from litellm_server.utils import set_callbacks, load_router_config, print_verbose
import dotenv
dotenv.load_dotenv() # load env variables
@ -106,10 +106,13 @@ async def chat_completion(request: Request, model: Optional[str] = None):
env_validation = litellm.validate_environment(model=data["model"])
if (env_validation['keys_in_environment'] is False or os.getenv("AUTH_STRATEGY", None) == "DYNAMIC") and "authorization" in request.headers: # if users pass LLM api keys as part of header
api_key = request.headers.get("authorization")
print_verbose(f"api_key in headers: {api_key}")
api_key = api_key.split(" ")[1]
print_verbose(f"api_key split: {api_key}")
if len(api_key) > 0:
api_key = api_key
data["api_key"] = api_key
print_verbose(f"api_key in data: {api_key}")
## CHECK CONFIG ##
if llm_model_list and data["model"] in [m["model_name"] for m in llm_model_list]:
for m in llm_model_list:
@ -117,7 +120,7 @@ async def chat_completion(request: Request, model: Optional[str] = None):
for key, value in m["litellm_params"].items():
data[key] = value
break
print(f"data going into litellm completion: {data}")
print_verbose(f"data going into litellm completion: {data}")
response = litellm.completion(
**data
)

View file

@ -1,6 +1,10 @@
import os, litellm
import pkg_resources
def print_verbose(print_statement):
if os.environ["SET_VERBOSE"] == True:
print(print_statement)
def get_package_version(package_name):
try:
package = pkg_resources.get_distribution(package_name)