From f307e82a41f26801799aebee491680180c9d00d4 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 29 Nov 2023 07:50:50 -0800 Subject: [PATCH] (fix) proxy: making receiving data print_verbose --- litellm/proxy/proxy_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 5ac2acf38..335205eca 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -637,7 +637,7 @@ def model_list(): all_models = list(set(all_models + [m["model_name"] for m in llm_model_list])) if user_model is not None: all_models += [user_model] - print(f"all_models: {all_models}") + print_verbose(f"all_models: {all_models}") ### CHECK OLLAMA MODELS ### try: response = requests.get("http://0.0.0.0:11434/api/tags") @@ -709,7 +709,7 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap data = ast.literal_eval(body_str) except: data = json.loads(body_str) - print(f"receiving data: {data}") + print_verbose(f"receiving data: {data}") data["model"] = ( general_settings.get("completion_model", None) # server default or user_model # model name passed via cli args @@ -768,7 +768,7 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap async def embeddings(request: Request, user_api_key_dict: dict = Depends(user_api_key_auth)): try: data = await request.json() - print(f"data: {data}") + print_verbose(f"data: {data}") data["model"] = ( general_settings.get("embedding_model", None) # server default or user_model # model name passed via cli args