forked from phoenix/litellm-mirror
with prompt layer test
This commit is contained in:
parent
7d99e09a4a
commit
b2e72e52e4
3 changed files with 63 additions and 15 deletions
|
@ -1,5 +1,5 @@
|
|||
#### What this does ####
|
||||
# On success, logs events to Helicone
|
||||
# On success, logs events to Promptlayer
|
||||
import dotenv, os
|
||||
import requests
|
||||
import requests
|
||||
|
@ -7,32 +7,34 @@ import requests
|
|||
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||
import traceback
|
||||
|
||||
class PromptLayer:
|
||||
class PromptLayerLogger:
|
||||
# Class variables or attributes
|
||||
def __init__(self):
|
||||
# Instance variables
|
||||
self.key = os.getenv("PROMPTLAYER_API_KEY")
|
||||
|
||||
def log_event(self, model, response_obj, start_time, end_time, print_verbose):
|
||||
def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose):
|
||||
# Method definition
|
||||
try:
|
||||
print_verbose(
|
||||
f"Prompt Layer Logging - Enters logging function for model {model}"
|
||||
f"Prompt Layer Logging - Enters logging function for model {kwargs}"
|
||||
)
|
||||
|
||||
request_response = requests.post(
|
||||
"https://api.promptlayer.com/rest/track-request",
|
||||
"https://api.promptlayer.com/rest/track-request",
|
||||
json={
|
||||
"function_name": "openai.Completion.create",
|
||||
"kwargs": {"engine": "text-ada-001", "prompt": "My name is"},
|
||||
"function_name": "openai.ChatCompletion.create",
|
||||
"kwargs": kwargs,
|
||||
"tags": ["hello", "world"],
|
||||
"request_response": response_obj,
|
||||
"request_start_time": start_time,
|
||||
"request_end_time": end_time,
|
||||
"prompt_id": "<PROMPT ID>",
|
||||
"prompt_input_variables": "<Dictionary of variables for prompt>",
|
||||
"prompt_version":1,
|
||||
"api_key": self.key
|
||||
"request_response": dict(response_obj), # TODO: Check if we need a dict
|
||||
"request_start_time": int(start_time.timestamp()),
|
||||
"request_end_time": int(end_time.timestamp()),
|
||||
"api_key": self.key,
|
||||
# Optional params for PromptLayer
|
||||
# "prompt_id": "<PROMPT ID>",
|
||||
# "prompt_input_variables": "<Dictionary of variables for prompt>",
|
||||
# "prompt_version":1,
|
||||
|
||||
},
|
||||
)
|
||||
|
||||
|
|
31
litellm/tests/test_promptlayer_integration.py
Normal file
31
litellm/tests/test_promptlayer_integration.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
# #### What this tests ####
|
||||
# # This tests if logging to the llmonitor integration actually works
|
||||
# # Adds the parent directory to the system path
|
||||
# import sys
|
||||
# import os
|
||||
|
||||
# sys.path.insert(0, os.path.abspath('../..'))
|
||||
|
||||
# from litellm import completion, embedding
|
||||
# import litellm
|
||||
|
||||
# litellm.success_callback = ["promptlayer"]
|
||||
|
||||
|
||||
# litellm.set_verbose = True
|
||||
|
||||
|
||||
# def test_chat_openai():
|
||||
# try:
|
||||
# response = completion(model="gpt-3.5-turbo",
|
||||
# messages=[{
|
||||
# "role": "user",
|
||||
# "content": "Hi 👋 - i'm openai"
|
||||
# }])
|
||||
|
||||
# print(response)
|
||||
|
||||
# except Exception as e:
|
||||
# print(e)
|
||||
|
||||
# # test_chat_openai()
|
|
@ -13,6 +13,8 @@ from .integrations.helicone import HeliconeLogger
|
|||
from .integrations.aispend import AISpendLogger
|
||||
from .integrations.berrispend import BerriSpendLogger
|
||||
from .integrations.supabase import Supabase
|
||||
from .integrations.llmonitor import LLMonitorLogger
|
||||
from .integrations.prompt_layer import PromptLayerLogger
|
||||
from .integrations.litedebugger import LiteDebugger
|
||||
from openai.error import OpenAIError as OriginalError
|
||||
from openai.openai_object import OpenAIObject
|
||||
|
@ -35,6 +37,7 @@ posthog = None
|
|||
slack_app = None
|
||||
alerts_channel = None
|
||||
heliconeLogger = None
|
||||
promptLayerLogger = None
|
||||
aispendLogger = None
|
||||
berrispendLogger = None
|
||||
supabaseClient = None
|
||||
|
@ -729,7 +732,7 @@ def load_test_model(
|
|||
|
||||
|
||||
def set_callbacks(callback_list):
|
||||
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
|
||||
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger
|
||||
try:
|
||||
for callback in callback_list:
|
||||
print_verbose(f"callback: {callback}")
|
||||
|
@ -784,6 +787,8 @@ def set_callbacks(callback_list):
|
|||
heliconeLogger = HeliconeLogger()
|
||||
elif callback == "llmonitor":
|
||||
llmonitorLogger = LLMonitorLogger()
|
||||
elif callback == "promptlayer":
|
||||
promptLayerLogger = PromptLayerLogger()
|
||||
elif callback == "aispend":
|
||||
aispendLogger = AISpendLogger()
|
||||
elif callback == "berrispend":
|
||||
|
@ -1053,6 +1058,16 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
|||
run_id=kwargs["litellm_call_id"],
|
||||
print_verbose=print_verbose,
|
||||
)
|
||||
elif callback == "promptlayer":
|
||||
print_verbose("reaches promptlayer for logging!")
|
||||
promptLayerLogger.log_event(
|
||||
kwargs=kwargs,
|
||||
response_obj=result,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
print_verbose=print_verbose,
|
||||
|
||||
)
|
||||
elif callback == "aispend":
|
||||
print_verbose("reaches aispend for logging!")
|
||||
model = args[0] if len(args) > 0 else kwargs["model"]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue