diff --git a/litellm/integrations/prompt_layer.py b/litellm/integrations/prompt_layer.py index 783434e8f..e1cdb666e 100644 --- a/litellm/integrations/prompt_layer.py +++ b/litellm/integrations/prompt_layer.py @@ -1,5 +1,5 @@ #### What this does #### -# On success, logs events to Helicone +# On success, logs events to Promptlayer import dotenv, os import requests import requests @@ -7,32 +7,34 @@ import requests dotenv.load_dotenv() # Loading env variables using dotenv import traceback -class PromptLayer: +class PromptLayerLogger: # Class variables or attributes def __init__(self): # Instance variables self.key = os.getenv("PROMPTLAYER_API_KEY") - def log_event(self, model, response_obj, start_time, end_time, print_verbose): + def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose): # Method definition try: print_verbose( - f"Prompt Layer Logging - Enters logging function for model {model}" + f"Prompt Layer Logging - Enters logging function for model {kwargs}" ) request_response = requests.post( - "https://api.promptlayer.com/rest/track-request", + "https://api.promptlayer.com/rest/track-request", json={ - "function_name": "openai.Completion.create", - "kwargs": {"engine": "text-ada-001", "prompt": "My name is"}, + "function_name": "openai.ChatCompletion.create", + "kwargs": kwargs, "tags": ["hello", "world"], - "request_response": response_obj, - "request_start_time": start_time, - "request_end_time": end_time, - "prompt_id": "", - "prompt_input_variables": "", - "prompt_version":1, - "api_key": self.key + "request_response": dict(response_obj), # TODO: Check if we need a dict + "request_start_time": int(start_time.timestamp()), + "request_end_time": int(end_time.timestamp()), + "api_key": self.key, + # Optional params for PromptLayer + # "prompt_id": "", + # "prompt_input_variables": "", + # "prompt_version":1, + }, ) diff --git a/litellm/tests/test_promptlayer_integration.py b/litellm/tests/test_promptlayer_integration.py new file mode 100644 index 000000000..2a43d5373 --- /dev/null +++ b/litellm/tests/test_promptlayer_integration.py @@ -0,0 +1,31 @@ +# #### What this tests #### +# # This tests if logging to the llmonitor integration actually works +# # Adds the parent directory to the system path +# import sys +# import os + +# sys.path.insert(0, os.path.abspath('../..')) + +# from litellm import completion, embedding +# import litellm + +# litellm.success_callback = ["promptlayer"] + + +# litellm.set_verbose = True + + +# def test_chat_openai(): +# try: +# response = completion(model="gpt-3.5-turbo", +# messages=[{ +# "role": "user", +# "content": "Hi 👋 - i'm openai" +# }]) + +# print(response) + +# except Exception as e: +# print(e) + +# # test_chat_openai() diff --git a/litellm/utils.py b/litellm/utils.py index 296ec308e..96d094088 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -13,6 +13,8 @@ from .integrations.helicone import HeliconeLogger from .integrations.aispend import AISpendLogger from .integrations.berrispend import BerriSpendLogger from .integrations.supabase import Supabase +from .integrations.llmonitor import LLMonitorLogger +from .integrations.prompt_layer import PromptLayerLogger from .integrations.litedebugger import LiteDebugger from openai.error import OpenAIError as OriginalError from openai.openai_object import OpenAIObject @@ -35,6 +37,7 @@ posthog = None slack_app = None alerts_channel = None heliconeLogger = None +promptLayerLogger = None aispendLogger = None berrispendLogger = None supabaseClient = None @@ -729,7 +732,7 @@ def load_test_model( def set_callbacks(callback_list): - global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger + global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger try: for callback in callback_list: print_verbose(f"callback: {callback}") @@ -784,6 +787,8 @@ def set_callbacks(callback_list): heliconeLogger = HeliconeLogger() elif callback == "llmonitor": llmonitorLogger = LLMonitorLogger() + elif callback == "promptlayer": + promptLayerLogger = PromptLayerLogger() elif callback == "aispend": aispendLogger = AISpendLogger() elif callback == "berrispend": @@ -1053,6 +1058,16 @@ def handle_success(args, kwargs, result, start_time, end_time): run_id=kwargs["litellm_call_id"], print_verbose=print_verbose, ) + elif callback == "promptlayer": + print_verbose("reaches promptlayer for logging!") + promptLayerLogger.log_event( + kwargs=kwargs, + response_obj=result, + start_time=start_time, + end_time=end_time, + print_verbose=print_verbose, + + ) elif callback == "aispend": print_verbose("reaches aispend for logging!") model = args[0] if len(args) > 0 else kwargs["model"]