mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
(feat) use no-log as a litellm param
This commit is contained in:
parent
d6dc28f0ed
commit
ddd231a8c2
2 changed files with 19 additions and 8 deletions
|
@ -487,6 +487,8 @@ def completion(
|
||||||
### ASYNC CALLS ###
|
### ASYNC CALLS ###
|
||||||
acompletion = kwargs.get("acompletion", False)
|
acompletion = kwargs.get("acompletion", False)
|
||||||
client = kwargs.get("client", None)
|
client = kwargs.get("client", None)
|
||||||
|
### Admin Controls ###
|
||||||
|
no_log = kwargs.get("no-log", False)
|
||||||
######## end of unpacking kwargs ###########
|
######## end of unpacking kwargs ###########
|
||||||
openai_params = [
|
openai_params = [
|
||||||
"functions",
|
"functions",
|
||||||
|
@ -727,6 +729,7 @@ def completion(
|
||||||
model_info=model_info,
|
model_info=model_info,
|
||||||
proxy_server_request=proxy_server_request,
|
proxy_server_request=proxy_server_request,
|
||||||
preset_cache_key=preset_cache_key,
|
preset_cache_key=preset_cache_key,
|
||||||
|
no_log=no_log,
|
||||||
)
|
)
|
||||||
logging.update_environment_variables(
|
logging.update_environment_variables(
|
||||||
model=model,
|
model=model,
|
||||||
|
|
|
@ -1279,6 +1279,10 @@ class Logging:
|
||||||
|
|
||||||
for callback in callbacks:
|
for callback in callbacks:
|
||||||
try:
|
try:
|
||||||
|
litellm_params = self.model_call_details.get("litellm_params", {})
|
||||||
|
if litellm_params.get("no-log", False) == True:
|
||||||
|
print_verbose("no-log request, skipping logging")
|
||||||
|
continue
|
||||||
if callback == "lite_debugger":
|
if callback == "lite_debugger":
|
||||||
print_verbose("reaches lite_debugger for logging!")
|
print_verbose("reaches lite_debugger for logging!")
|
||||||
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
|
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
|
||||||
|
@ -1708,6 +1712,9 @@ class Logging:
|
||||||
verbose_logger.debug(f"Async success callbacks: {callbacks}")
|
verbose_logger.debug(f"Async success callbacks: {callbacks}")
|
||||||
for callback in callbacks:
|
for callback in callbacks:
|
||||||
try:
|
try:
|
||||||
|
if kwargs.get("no-log", False) == True:
|
||||||
|
print_verbose("no-log request, skipping logging")
|
||||||
|
continue
|
||||||
if callback == "cache" and litellm.cache is not None:
|
if callback == "cache" and litellm.cache is not None:
|
||||||
# set_cache once complete streaming response is built
|
# set_cache once complete streaming response is built
|
||||||
print_verbose("async success_callback: reaches cache for logging!")
|
print_verbose("async success_callback: reaches cache for logging!")
|
||||||
|
@ -2986,14 +2993,13 @@ def client(original_function):
|
||||||
f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}"
|
f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}"
|
||||||
)
|
)
|
||||||
# check if user does not want this to be logged
|
# check if user does not want this to be logged
|
||||||
if kwargs.get("no-log", False) == False:
|
asyncio.create_task(
|
||||||
asyncio.create_task(
|
logging_obj.async_success_handler(result, start_time, end_time)
|
||||||
logging_obj.async_success_handler(result, start_time, end_time)
|
)
|
||||||
)
|
threading.Thread(
|
||||||
threading.Thread(
|
target=logging_obj.success_handler,
|
||||||
target=logging_obj.success_handler,
|
args=(result, start_time, end_time),
|
||||||
args=(result, start_time, end_time),
|
).start()
|
||||||
).start()
|
|
||||||
|
|
||||||
# RETURN RESULT
|
# RETURN RESULT
|
||||||
if hasattr(result, "_hidden_params"):
|
if hasattr(result, "_hidden_params"):
|
||||||
|
@ -3895,6 +3901,7 @@ def get_litellm_params(
|
||||||
proxy_server_request=None,
|
proxy_server_request=None,
|
||||||
acompletion=None,
|
acompletion=None,
|
||||||
preset_cache_key=None,
|
preset_cache_key=None,
|
||||||
|
no_log=None,
|
||||||
):
|
):
|
||||||
litellm_params = {
|
litellm_params = {
|
||||||
"acompletion": acompletion,
|
"acompletion": acompletion,
|
||||||
|
@ -3911,6 +3918,7 @@ def get_litellm_params(
|
||||||
"model_info": model_info,
|
"model_info": model_info,
|
||||||
"proxy_server_request": proxy_server_request,
|
"proxy_server_request": proxy_server_request,
|
||||||
"preset_cache_key": preset_cache_key,
|
"preset_cache_key": preset_cache_key,
|
||||||
|
"no-log": no_log,
|
||||||
"stream_response": {}, # litellm_call_id: ModelResponse Dict
|
"stream_response": {}, # litellm_call_id: ModelResponse Dict
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue