forked from phoenix/litellm-mirror
adding lite debugger integration
This commit is contained in:
parent
02a6eb66ce
commit
a1ad773667
14 changed files with 169 additions and 10 deletions
BIN
dist/litellm-0.1.434-py3-none-any.whl
vendored
Normal file
BIN
dist/litellm-0.1.434-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.434.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.434.tar.gz
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.435-py3-none-any.whl
vendored
Normal file
BIN
dist/litellm-0.1.435-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.435.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.435.tar.gz
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
74
litellm/integrations/litedebugger.py
Normal file
74
litellm/integrations/litedebugger.py
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
import requests, traceback, json
|
||||||
|
class LiteDebugger:
|
||||||
|
def __init__(self):
|
||||||
|
self.api_url = "https://api.litellm.ai/debugger"
|
||||||
|
pass
|
||||||
|
|
||||||
|
def input_log_event(self, model, messages, end_user, litellm_call_id, print_verbose):
|
||||||
|
try:
|
||||||
|
print_verbose(
|
||||||
|
f"LiteLLMDebugger: Logging - Enters input logging function for model {model}"
|
||||||
|
)
|
||||||
|
litellm_data_obj = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"end_user": end_user,
|
||||||
|
"status": "initiated",
|
||||||
|
"litellm_call_id": litellm_call_id
|
||||||
|
}
|
||||||
|
response = requests.post(url=self.api_url, headers={"content-type": "application/json"}, data=json.dumps(litellm_data_obj))
|
||||||
|
print_verbose(f"LiteDebugger: api response - {response.text}")
|
||||||
|
except:
|
||||||
|
print_verbose(f"LiteDebugger: Logging Error - {traceback.format_exc()}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
def log_event(self, model,
|
||||||
|
messages,
|
||||||
|
end_user,
|
||||||
|
response_obj,
|
||||||
|
start_time,
|
||||||
|
end_time,
|
||||||
|
litellm_call_id,
|
||||||
|
print_verbose,):
|
||||||
|
try:
|
||||||
|
print_verbose(
|
||||||
|
f"LiteLLMDebugger: Logging - Enters input logging function for model {model}"
|
||||||
|
)
|
||||||
|
total_cost = 0 # [TODO] implement cost tracking
|
||||||
|
response_time = (end_time - start_time).total_seconds()
|
||||||
|
if "choices" in response_obj:
|
||||||
|
litellm_data_obj = {
|
||||||
|
"response_time": response_time,
|
||||||
|
"model": response_obj["model"],
|
||||||
|
"total_cost": total_cost,
|
||||||
|
"messages": messages,
|
||||||
|
"response": response_obj["choices"][0]["message"]["content"],
|
||||||
|
"end_user": end_user,
|
||||||
|
"litellm_call_id": litellm_call_id,
|
||||||
|
"status": "success"
|
||||||
|
}
|
||||||
|
print_verbose(
|
||||||
|
f"LiteDebugger: Logging - final data object: {litellm_data_obj}"
|
||||||
|
)
|
||||||
|
response = requests.post(url=self.api_url, headers={"content-type": "application/json"}, data=json.dumps(litellm_data_obj))
|
||||||
|
elif "error" in response_obj:
|
||||||
|
if "Unable to map your input to a model." in response_obj["error"]:
|
||||||
|
total_cost = 0
|
||||||
|
litellm_data_obj = {
|
||||||
|
"response_time": response_time,
|
||||||
|
"model": response_obj["model"],
|
||||||
|
"total_cost": total_cost,
|
||||||
|
"messages": messages,
|
||||||
|
"error": response_obj["error"],
|
||||||
|
"end_user": end_user,
|
||||||
|
"litellm_call_id": litellm_call_id,
|
||||||
|
"status": "failure"
|
||||||
|
}
|
||||||
|
print_verbose(
|
||||||
|
f"LiteDebugger: Logging - final data object: {litellm_data_obj}"
|
||||||
|
)
|
||||||
|
response = requests.post(url=self.api_url, headers={"content-type": "application/json"}, data=json.dumps(litellm_data_obj))
|
||||||
|
print_verbose(f"LiteDebugger: api response - {response.text}")
|
||||||
|
except:
|
||||||
|
print_verbose(f"LiteDebugger: Logging Error - {traceback.format_exc()}")
|
||||||
|
pass
|
|
@ -707,9 +707,10 @@ def embedding(model, input=[], azure=False, force_timeout=60, litellm_call_id=No
|
||||||
|
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
## LOGGING
|
||||||
|
logging.post_call(input=input, api_key=openai.api_key, original_response=e)
|
||||||
## Map to OpenAI Exception
|
## Map to OpenAI Exception
|
||||||
raise exception_type(model=model, original_exception=e, custom_llm_provider="azure" if azure==True else None)
|
raise exception_type(model=model, original_exception=e, custom_llm_provider="azure" if azure==True else None)
|
||||||
raise e
|
|
||||||
|
|
||||||
|
|
||||||
####### HELPER FUNCTIONS ################
|
####### HELPER FUNCTIONS ################
|
||||||
|
|
26
litellm/tests/test_litedebugger_integration.py
Normal file
26
litellm/tests/test_litedebugger_integration.py
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# #### What this tests ####
|
||||||
|
# # This tests if logging to the litedebugger integration actually works
|
||||||
|
# # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
||||||
|
# import sys, os
|
||||||
|
# import traceback
|
||||||
|
# import pytest
|
||||||
|
|
||||||
|
# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||||
|
# import litellm
|
||||||
|
# from litellm import embedding, completion
|
||||||
|
|
||||||
|
# litellm.input_callback = ["lite_debugger"]
|
||||||
|
# litellm.success_callback = ["lite_debugger"]
|
||||||
|
# litellm.failure_callback = ["lite_debugger"]
|
||||||
|
|
||||||
|
# litellm.set_verbose = True
|
||||||
|
|
||||||
|
# user_message = "Hello, how are you?"
|
||||||
|
# messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
|
||||||
|
|
||||||
|
# #openai call
|
||||||
|
# response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
||||||
|
|
||||||
|
# #bad request call
|
||||||
|
# response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
|
@ -1,5 +1,5 @@
|
||||||
# #### What this tests ####
|
# #### What this tests ####
|
||||||
# # This tests if logging to the helicone integration actually works
|
# # This tests if logging to the supabase integration actually works
|
||||||
# # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
# # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
||||||
# import sys, os
|
# import sys, os
|
||||||
# import traceback
|
# import traceback
|
||||||
|
@ -13,7 +13,7 @@
|
||||||
# litellm.success_callback = ["supabase"]
|
# litellm.success_callback = ["supabase"]
|
||||||
# litellm.failure_callback = ["supabase"]
|
# litellm.failure_callback = ["supabase"]
|
||||||
|
|
||||||
# litellm.modify_integration("supabase",{"table_name": "test_table"})
|
# # litellm.modify_integration("supabase",{"table_name": "test_table"})
|
||||||
|
|
||||||
# litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ from .integrations.helicone import HeliconeLogger
|
||||||
from .integrations.aispend import AISpendLogger
|
from .integrations.aispend import AISpendLogger
|
||||||
from .integrations.berrispend import BerriSpendLogger
|
from .integrations.berrispend import BerriSpendLogger
|
||||||
from .integrations.supabase import Supabase
|
from .integrations.supabase import Supabase
|
||||||
|
from .integrations.litedebugger import LiteDebugger
|
||||||
from openai.error import OpenAIError as OriginalError
|
from openai.error import OpenAIError as OriginalError
|
||||||
from openai.openai_object import OpenAIObject
|
from openai.openai_object import OpenAIObject
|
||||||
from .exceptions import (
|
from .exceptions import (
|
||||||
|
@ -35,6 +36,7 @@ heliconeLogger = None
|
||||||
aispendLogger = None
|
aispendLogger = None
|
||||||
berrispendLogger = None
|
berrispendLogger = None
|
||||||
supabaseClient = None
|
supabaseClient = None
|
||||||
|
liteDebuggerClient = None
|
||||||
callback_list: Optional[List[str]] = []
|
callback_list: Optional[List[str]] = []
|
||||||
user_logger_fn = None
|
user_logger_fn = None
|
||||||
additional_details: Optional[Dict[str, str]] = {}
|
additional_details: Optional[Dict[str, str]] = {}
|
||||||
|
@ -136,7 +138,7 @@ def install_and_import(package: str):
|
||||||
####### LOGGING ###################
|
####### LOGGING ###################
|
||||||
# Logging function -> log the exact model details + what's being sent | Non-Blocking
|
# Logging function -> log the exact model details + what's being sent | Non-Blocking
|
||||||
class Logging:
|
class Logging:
|
||||||
global supabaseClient
|
global supabaseClient, liteDebuggerClient
|
||||||
def __init__(self, model, messages, optional_params, litellm_params):
|
def __init__(self, model, messages, optional_params, litellm_params):
|
||||||
self.model = model
|
self.model = model
|
||||||
self.messages = messages
|
self.messages = messages
|
||||||
|
@ -178,7 +180,7 @@ class Logging:
|
||||||
print_verbose("reaches supabase for logging!")
|
print_verbose("reaches supabase for logging!")
|
||||||
model = self.model
|
model = self.model
|
||||||
messages = self.messages
|
messages = self.messages
|
||||||
print(f"litellm._thread_context: {litellm._thread_context}")
|
print(f"supabaseClient: {supabaseClient}")
|
||||||
supabaseClient.input_log_event(
|
supabaseClient.input_log_event(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
@ -186,8 +188,20 @@ class Logging:
|
||||||
litellm_call_id=self.litellm_params["litellm_call_id"],
|
litellm_call_id=self.litellm_params["litellm_call_id"],
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
|
elif callback == "lite_debugger":
|
||||||
|
print_verbose("reaches litedebugger for logging!")
|
||||||
|
model = self.model
|
||||||
|
messages = self.messages
|
||||||
|
print(f"liteDebuggerClient: {liteDebuggerClient}")
|
||||||
|
liteDebuggerClient.input_log_event(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
end_user=litellm._thread_context.user,
|
||||||
|
litellm_call_id=self.litellm_params["litellm_call_id"],
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print_verbose(f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging with integrations {traceback.format_exc}")
|
print_verbose(f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while input logging with integrations {traceback.format_exc()}")
|
||||||
print_verbose(
|
print_verbose(
|
||||||
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
|
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
|
||||||
)
|
)
|
||||||
|
@ -635,7 +649,7 @@ def load_test_model(
|
||||||
|
|
||||||
|
|
||||||
def set_callbacks(callback_list):
|
def set_callbacks(callback_list):
|
||||||
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient
|
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient
|
||||||
try:
|
try:
|
||||||
for callback in callback_list:
|
for callback in callback_list:
|
||||||
print(f"callback: {callback}")
|
print(f"callback: {callback}")
|
||||||
|
@ -697,12 +711,15 @@ def set_callbacks(callback_list):
|
||||||
elif callback == "supabase":
|
elif callback == "supabase":
|
||||||
print(f"instantiating supabase")
|
print(f"instantiating supabase")
|
||||||
supabaseClient = Supabase()
|
supabaseClient = Supabase()
|
||||||
|
elif callback == "lite_debugger":
|
||||||
|
print(f"instantiating lite_debugger")
|
||||||
|
liteDebuggerClient = LiteDebugger()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
|
def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs):
|
||||||
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient
|
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient
|
||||||
try:
|
try:
|
||||||
# print_verbose(f"handle_failure args: {args}")
|
# print_verbose(f"handle_failure args: {args}")
|
||||||
# print_verbose(f"handle_failure kwargs: {kwargs}")
|
# print_verbose(f"handle_failure kwargs: {kwargs}")
|
||||||
|
@ -827,6 +844,32 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args, k
|
||||||
litellm_call_id=kwargs["litellm_call_id"],
|
litellm_call_id=kwargs["litellm_call_id"],
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
|
elif callback == "lite_debugger":
|
||||||
|
print_verbose("reaches lite_debugger for logging!")
|
||||||
|
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
|
||||||
|
model = args[0] if len(args) > 0 else kwargs["model"]
|
||||||
|
messages = args[1] if len(args) > 1 else kwargs["messages"]
|
||||||
|
result = {
|
||||||
|
"model": model,
|
||||||
|
"created": time.time(),
|
||||||
|
"error": traceback_exception,
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": prompt_token_calculator(
|
||||||
|
model, messages=messages
|
||||||
|
),
|
||||||
|
"completion_tokens": 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
liteDebuggerClient.log_event(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
end_user=litellm._thread_context.user,
|
||||||
|
response_obj=result,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
litellm_call_id=kwargs["litellm_call_id"],
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
print_verbose(
|
print_verbose(
|
||||||
f"Error Occurred while logging failure: {traceback.format_exc()}"
|
f"Error Occurred while logging failure: {traceback.format_exc()}"
|
||||||
|
@ -847,7 +890,7 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args, k
|
||||||
|
|
||||||
|
|
||||||
def handle_success(args, kwargs, result, start_time, end_time):
|
def handle_success(args, kwargs, result, start_time, end_time):
|
||||||
global heliconeLogger, aispendLogger, supabaseClient
|
global heliconeLogger, aispendLogger, supabaseClient, liteDebuggerClient
|
||||||
try:
|
try:
|
||||||
success_handler = additional_details.pop("success_handler", None)
|
success_handler = additional_details.pop("success_handler", None)
|
||||||
failure_handler = additional_details.pop("failure_handler", None)
|
failure_handler = additional_details.pop("failure_handler", None)
|
||||||
|
@ -925,6 +968,21 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
||||||
litellm_call_id=kwargs["litellm_call_id"],
|
litellm_call_id=kwargs["litellm_call_id"],
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
|
elif callback == "lite_debugger":
|
||||||
|
print_verbose("reaches lite_debugger for logging!")
|
||||||
|
model = args[0] if len(args) > 0 else kwargs["model"]
|
||||||
|
messages = args[1] if len(args) > 1 else kwargs["messages"]
|
||||||
|
print(f"liteDebuggerClient: {liteDebuggerClient}")
|
||||||
|
liteDebuggerClient.log_event(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
end_user=litellm._thread_context.user,
|
||||||
|
response_obj=result,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
litellm_call_id=kwargs["litellm_call_id"],
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
## LOGGING
|
## LOGGING
|
||||||
exception_logging(logger_fn=user_logger_fn, exception=e)
|
exception_logging(logger_fn=user_logger_fn, exception=e)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.434"
|
version = "0.1.436"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue