forked from phoenix/litellm-mirror
with prompt layer test
This commit is contained in:
parent
7d99e09a4a
commit
b2e72e52e4
3 changed files with 63 additions and 15 deletions
|
@ -1,5 +1,5 @@
|
||||||
#### What this does ####
|
#### What this does ####
|
||||||
# On success, logs events to Helicone
|
# On success, logs events to Promptlayer
|
||||||
import dotenv, os
|
import dotenv, os
|
||||||
import requests
|
import requests
|
||||||
import requests
|
import requests
|
||||||
|
@ -7,32 +7,34 @@ import requests
|
||||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
class PromptLayer:
|
class PromptLayerLogger:
|
||||||
# Class variables or attributes
|
# Class variables or attributes
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Instance variables
|
# Instance variables
|
||||||
self.key = os.getenv("PROMPTLAYER_API_KEY")
|
self.key = os.getenv("PROMPTLAYER_API_KEY")
|
||||||
|
|
||||||
def log_event(self, model, response_obj, start_time, end_time, print_verbose):
|
def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose):
|
||||||
# Method definition
|
# Method definition
|
||||||
try:
|
try:
|
||||||
print_verbose(
|
print_verbose(
|
||||||
f"Prompt Layer Logging - Enters logging function for model {model}"
|
f"Prompt Layer Logging - Enters logging function for model {kwargs}"
|
||||||
)
|
)
|
||||||
|
|
||||||
request_response = requests.post(
|
request_response = requests.post(
|
||||||
"https://api.promptlayer.com/rest/track-request",
|
"https://api.promptlayer.com/rest/track-request",
|
||||||
json={
|
json={
|
||||||
"function_name": "openai.Completion.create",
|
"function_name": "openai.ChatCompletion.create",
|
||||||
"kwargs": {"engine": "text-ada-001", "prompt": "My name is"},
|
"kwargs": kwargs,
|
||||||
"tags": ["hello", "world"],
|
"tags": ["hello", "world"],
|
||||||
"request_response": response_obj,
|
"request_response": dict(response_obj), # TODO: Check if we need a dict
|
||||||
"request_start_time": start_time,
|
"request_start_time": int(start_time.timestamp()),
|
||||||
"request_end_time": end_time,
|
"request_end_time": int(end_time.timestamp()),
|
||||||
"prompt_id": "<PROMPT ID>",
|
"api_key": self.key,
|
||||||
"prompt_input_variables": "<Dictionary of variables for prompt>",
|
# Optional params for PromptLayer
|
||||||
"prompt_version":1,
|
# "prompt_id": "<PROMPT ID>",
|
||||||
"api_key": self.key
|
# "prompt_input_variables": "<Dictionary of variables for prompt>",
|
||||||
|
# "prompt_version":1,
|
||||||
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
31
litellm/tests/test_promptlayer_integration.py
Normal file
31
litellm/tests/test_promptlayer_integration.py
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
# #### What this tests ####
|
||||||
|
# # This tests if logging to the llmonitor integration actually works
|
||||||
|
# # Adds the parent directory to the system path
|
||||||
|
# import sys
|
||||||
|
# import os
|
||||||
|
|
||||||
|
# sys.path.insert(0, os.path.abspath('../..'))
|
||||||
|
|
||||||
|
# from litellm import completion, embedding
|
||||||
|
# import litellm
|
||||||
|
|
||||||
|
# litellm.success_callback = ["promptlayer"]
|
||||||
|
|
||||||
|
|
||||||
|
# litellm.set_verbose = True
|
||||||
|
|
||||||
|
|
||||||
|
# def test_chat_openai():
|
||||||
|
# try:
|
||||||
|
# response = completion(model="gpt-3.5-turbo",
|
||||||
|
# messages=[{
|
||||||
|
# "role": "user",
|
||||||
|
# "content": "Hi 👋 - i'm openai"
|
||||||
|
# }])
|
||||||
|
|
||||||
|
# print(response)
|
||||||
|
|
||||||
|
# except Exception as e:
|
||||||
|
# print(e)
|
||||||
|
|
||||||
|
# # test_chat_openai()
|
|
@ -13,6 +13,8 @@ from .integrations.helicone import HeliconeLogger
|
||||||
from .integrations.aispend import AISpendLogger
|
from .integrations.aispend import AISpendLogger
|
||||||
from .integrations.berrispend import BerriSpendLogger
|
from .integrations.berrispend import BerriSpendLogger
|
||||||
from .integrations.supabase import Supabase
|
from .integrations.supabase import Supabase
|
||||||
|
from .integrations.llmonitor import LLMonitorLogger
|
||||||
|
from .integrations.prompt_layer import PromptLayerLogger
|
||||||
from .integrations.litedebugger import LiteDebugger
|
from .integrations.litedebugger import LiteDebugger
|
||||||
from openai.error import OpenAIError as OriginalError
|
from openai.error import OpenAIError as OriginalError
|
||||||
from openai.openai_object import OpenAIObject
|
from openai.openai_object import OpenAIObject
|
||||||
|
@ -35,6 +37,7 @@ posthog = None
|
||||||
slack_app = None
|
slack_app = None
|
||||||
alerts_channel = None
|
alerts_channel = None
|
||||||
heliconeLogger = None
|
heliconeLogger = None
|
||||||
|
promptLayerLogger = None
|
||||||
aispendLogger = None
|
aispendLogger = None
|
||||||
berrispendLogger = None
|
berrispendLogger = None
|
||||||
supabaseClient = None
|
supabaseClient = None
|
||||||
|
@ -729,7 +732,7 @@ def load_test_model(
|
||||||
|
|
||||||
|
|
||||||
def set_callbacks(callback_list):
|
def set_callbacks(callback_list):
|
||||||
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger
|
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger
|
||||||
try:
|
try:
|
||||||
for callback in callback_list:
|
for callback in callback_list:
|
||||||
print_verbose(f"callback: {callback}")
|
print_verbose(f"callback: {callback}")
|
||||||
|
@ -784,6 +787,8 @@ def set_callbacks(callback_list):
|
||||||
heliconeLogger = HeliconeLogger()
|
heliconeLogger = HeliconeLogger()
|
||||||
elif callback == "llmonitor":
|
elif callback == "llmonitor":
|
||||||
llmonitorLogger = LLMonitorLogger()
|
llmonitorLogger = LLMonitorLogger()
|
||||||
|
elif callback == "promptlayer":
|
||||||
|
promptLayerLogger = PromptLayerLogger()
|
||||||
elif callback == "aispend":
|
elif callback == "aispend":
|
||||||
aispendLogger = AISpendLogger()
|
aispendLogger = AISpendLogger()
|
||||||
elif callback == "berrispend":
|
elif callback == "berrispend":
|
||||||
|
@ -1053,6 +1058,16 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
||||||
run_id=kwargs["litellm_call_id"],
|
run_id=kwargs["litellm_call_id"],
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
|
elif callback == "promptlayer":
|
||||||
|
print_verbose("reaches promptlayer for logging!")
|
||||||
|
promptLayerLogger.log_event(
|
||||||
|
kwargs=kwargs,
|
||||||
|
response_obj=result,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
|
||||||
|
)
|
||||||
elif callback == "aispend":
|
elif callback == "aispend":
|
||||||
print_verbose("reaches aispend for logging!")
|
print_verbose("reaches aispend for logging!")
|
||||||
model = args[0] if len(args) > 0 else kwargs["model"]
|
model = args[0] if len(args) > 0 else kwargs["model"]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue