forked from phoenix/litellm-mirror
build(litellm_server/main.py): adding print_verbose for better logging
This commit is contained in:
parent
6fdca38442
commit
80c9c58f88
2 changed files with 10 additions and 3 deletions
|
@ -9,7 +9,7 @@ from typing import Optional
|
|||
try:
|
||||
from utils import set_callbacks, load_router_config
|
||||
except ImportError:
|
||||
from litellm_server.utils import set_callbacks, load_router_config
|
||||
from litellm_server.utils import set_callbacks, load_router_config, print_verbose
|
||||
import dotenv
|
||||
dotenv.load_dotenv() # load env variables
|
||||
|
||||
|
@ -79,7 +79,7 @@ async def embedding(request: Request):
|
|||
# default to always using the "ENV" variables, only if AUTH_STRATEGY==DYNAMIC then reads headers
|
||||
if os.getenv("AUTH_STRATEGY", None) == "DYNAMIC" and "authorization" in request.headers: # if users pass LLM api keys as part of header
|
||||
api_key = request.headers.get("authorization")
|
||||
api_key = api_key.replace("Bearer", "").strip()
|
||||
api_key = api_key.replace("Bearer", "").strip()
|
||||
if len(api_key.strip()) > 0:
|
||||
api_key = api_key
|
||||
data["api_key"] = api_key
|
||||
|
@ -106,10 +106,13 @@ async def chat_completion(request: Request, model: Optional[str] = None):
|
|||
env_validation = litellm.validate_environment(model=data["model"])
|
||||
if (env_validation['keys_in_environment'] is False or os.getenv("AUTH_STRATEGY", None) == "DYNAMIC") and "authorization" in request.headers: # if users pass LLM api keys as part of header
|
||||
api_key = request.headers.get("authorization")
|
||||
print_verbose(f"api_key in headers: {api_key}")
|
||||
api_key = api_key.split(" ")[1]
|
||||
print_verbose(f"api_key split: {api_key}")
|
||||
if len(api_key) > 0:
|
||||
api_key = api_key
|
||||
data["api_key"] = api_key
|
||||
print_verbose(f"api_key in data: {api_key}")
|
||||
## CHECK CONFIG ##
|
||||
if llm_model_list and data["model"] in [m["model_name"] for m in llm_model_list]:
|
||||
for m in llm_model_list:
|
||||
|
@ -117,7 +120,7 @@ async def chat_completion(request: Request, model: Optional[str] = None):
|
|||
for key, value in m["litellm_params"].items():
|
||||
data[key] = value
|
||||
break
|
||||
print(f"data going into litellm completion: {data}")
|
||||
print_verbose(f"data going into litellm completion: {data}")
|
||||
response = litellm.completion(
|
||||
**data
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue