mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
(fix) make print_verbose non blocking
This commit is contained in:
parent
d2a53f05ed
commit
f744445db4
7 changed files with 37 additions and 16 deletions
|
@ -1,5 +1,8 @@
|
|||
set_verbose = False
|
||||
|
||||
def print_verbose(print_statement):
|
||||
try:
|
||||
if set_verbose:
|
||||
print(print_statement) # noqa
|
||||
except:
|
||||
pass
|
|
@ -13,9 +13,12 @@ class BudgetManager:
|
|||
self.load_data()
|
||||
|
||||
def print_verbose(self, print_statement):
|
||||
try:
|
||||
if litellm.set_verbose:
|
||||
import logging
|
||||
logging.info(print_statement)
|
||||
except:
|
||||
pass
|
||||
|
||||
def load_data(self):
|
||||
if self.client_type == "local":
|
||||
|
|
|
@ -25,8 +25,11 @@ def get_prompt(*args, **kwargs):
|
|||
return None
|
||||
|
||||
def print_verbose(print_statement):
|
||||
try:
|
||||
if litellm.set_verbose:
|
||||
print(print_statement) # noqa
|
||||
except:
|
||||
pass
|
||||
|
||||
class BaseCache:
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
|
|
|
@ -2141,8 +2141,11 @@ def moderation(input: str, api_key: Optional[str]=None):
|
|||
####### HELPER FUNCTIONS ################
|
||||
## Set verbose to true -> ```litellm.set_verbose = True```
|
||||
def print_verbose(print_statement):
|
||||
try:
|
||||
if litellm.set_verbose:
|
||||
print(print_statement) # noqa
|
||||
except:
|
||||
pass
|
||||
|
||||
def config_completion(**kwargs):
|
||||
if litellm.config_path != None:
|
||||
|
|
|
@ -199,9 +199,12 @@ celery_app_conn = None
|
|||
celery_fn = None # Redis Queue for handling requests
|
||||
#### HELPER FUNCTIONS ####
|
||||
def print_verbose(print_statement):
|
||||
try:
|
||||
global user_debug
|
||||
if user_debug:
|
||||
print(print_statement)
|
||||
except:
|
||||
pass
|
||||
|
||||
def usage_telemetry(
|
||||
feature: str,
|
||||
|
|
|
@ -1076,8 +1076,11 @@ class Router:
|
|||
return deployment.get("client", None)
|
||||
|
||||
def print_verbose(self, print_statement):
|
||||
try:
|
||||
if self.set_verbose or litellm.set_verbose:
|
||||
print(f"LiteLLM.Router: {print_statement}") # noqa
|
||||
except:
|
||||
pass
|
||||
|
||||
def get_available_deployment(self,
|
||||
model: str,
|
||||
|
|
|
@ -512,8 +512,11 @@ class TextCompletionResponse(OpenAIObject):
|
|||
|
||||
############################################################
|
||||
def print_verbose(print_statement):
|
||||
try:
|
||||
if litellm.set_verbose:
|
||||
print(print_statement) # noqa
|
||||
except:
|
||||
pass
|
||||
|
||||
####### LOGGING ###################
|
||||
from enum import Enum
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue