fix litedebugger double logging error

This commit is contained in:
Krrish Dholakia 2023-09-07 18:02:20 -07:00
parent f80eaac47d
commit 554b05015e
5 changed files with 20 additions and 13 deletions

View file

@ -40,7 +40,7 @@ class LiteDebugger:
litellm_params,
optional_params,
):
print_verbose(f"LiteDebugger: Pre-API Call Logging")
print_verbose(f"LiteDebugger: Pre-API Call Logging for call id {litellm_call_id}")
try:
print_verbose(
f"LiteLLMDebugger: Logging - Enters input logging function for model {model}"
@ -101,7 +101,7 @@ class LiteDebugger:
pass
def post_call_log_event(self, original_response, litellm_call_id, print_verbose, call_type, stream):
print_verbose(f"LiteDebugger: Post-API Call Logging")
print_verbose(f"LiteDebugger: Post-API Call Logging for call id {litellm_call_id}")
try:
if call_type == "embedding":
litellm_data_obj = {
@ -147,7 +147,7 @@ class LiteDebugger:
call_type,
stream = False
):
print_verbose(f"LiteDebugger: Success/Failure Call Logging")
print_verbose(f"LiteDebugger: Success/Failure Call Logging for call id {litellm_call_id}")
try:
print_verbose(
f"LiteLLMDebugger: Success/Failure Logging - Enters handler logging function for function {call_type} and stream set to {stream} with response object {response_obj}"

View file

@ -70,13 +70,15 @@ messages = [{"content": user_message, "role": "user"}]
# # Redirect stdout
# old_stdout = sys.stdout
# sys.stdout = new_stdout = io.StringIO()
# litellm.token = "a67abbaf-35b8-4649-8647-68c5fe8d37fb" # generate one here - https://www.uuidgenerator.net/version4
# response = completion(model="gpt-3.5-turbo", messages=messages)
# litellm.token = "1e6795ea-a75e-4231-8110-dcc721dcffc3" # generate one here - https://www.uuidgenerator.net/version4
# completion(model="gpt-3.5-turbo", messages=messages)
# completion(model="claude-instant-1", messages=messages)
# # Restore stdout
# sys.stdout = old_stdout
# output = new_stdout.getvalue().strip()
# print(output)
# if "LiteDebugger: Pre-API Call Logging" not in output:
# raise Exception(f"LiteLLMDebugger: pre-api call not logged!")
# if "LiteDebugger: Post-API Call Logging" not in output:

View file

@ -159,7 +159,7 @@ class Logging:
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
print_verbose(f"Logging Details Pre-API Call")
print_verbose(f"Logging Details Pre-API Call for call id {self.litellm_call_id}")
try:
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
@ -200,7 +200,7 @@ class Logging:
)
elif callback == "lite_debugger":
print_verbose("reaches litedebugger for logging!")
print_verbose(f"reaches litedebugger for logging! - model_call_details {self.model_call_details}")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
@ -294,6 +294,7 @@ class Logging:
start_time = self.start_time
if end_time is None:
end_time = datetime.datetime.now()
print_verbose(f"success callbacks: {litellm.success_callback}")
for callback in litellm.success_callback:
try:
if callback == "lite_debugger":
@ -441,9 +442,12 @@ def client(original_function):
function_id = kwargs["id"] if "id" in kwargs else None
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized")
litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger")
if "lite_debugger" not in litellm.input_callback:
litellm.input_callback.append("lite_debugger")
if "lite_debugger" not in litellm.success_callback:
litellm.success_callback.append("lite_debugger")
if "lite_debugger" not in litellm.failure_callback:
litellm.failure_callback.append("lite_debugger")
if (
len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0
@ -540,7 +544,8 @@ def client(original_function):
result['litellm_call_id'] = litellm_call_id
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
logging_obj.success_handler(result, start_time, end_time)
# threading.Thread(target=logging_obj.success_handler, args=(result, start_time, end_time)).start()
my_thread = threading.Thread(
target=handle_success, args=(args, kwargs, result, start_time, end_time)
) # don't interrupt execution of main thread

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.560"
version = "0.1.561"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"