mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
add integration files
This commit is contained in:
parent
9e9b1f1504
commit
b1fb75af1b
3 changed files with 96 additions and 10 deletions
|
@ -1,12 +1,9 @@
|
|||
# Logging Integrations
|
||||
|
||||
| Integration | Required OS Variables | How to Use with callbacks |
|
||||
|-----------------|--------------------------------------------|-------------------------------------------|
|
||||
| ----------- | -------------------------------------------------------- | ---------------------------------------- |
|
||||
| LLMonitor | `LLMONITOR_APP_ID` | `litellm.success_callback=["llmonitor"]` |
|
||||
| Sentry | `SENTRY_API_URL` | `litellm.success_callback=["sentry"]` |
|
||||
| Posthog | `POSTHOG_API_KEY`,`POSTHOG_API_URL` | `litellm.success_callback=["posthog"]` |
|
||||
| Slack | `SLACK_API_TOKEN`,`SLACK_API_SECRET`,`SLACK_API_CHANNEL` | `litellm.success_callback=["slack"]` |
|
||||
| Helicone | `HELICONE_API_TOKEN` | `litellm.success_callback=["helicone"]` |
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
61
litellm/integrations/llmonitor.py
Normal file
61
litellm/integrations/llmonitor.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
#### What this does ####
|
||||
# On success + failure, log events to aispend.io
|
||||
import datetime
|
||||
import traceback
|
||||
import dotenv
|
||||
import os
|
||||
import requests
|
||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||
|
||||
|
||||
class LLMonitorLogger:
|
||||
# Class variables or attributes
|
||||
def __init__(self):
|
||||
# Instance variables
|
||||
self.api_url = os.getenv(
|
||||
"LLMONITOR_API_URL") or "https://app.llmonitor.com"
|
||||
self.account_id = os.getenv("LLMONITOR_APP_ID")
|
||||
|
||||
def log_event(self, model, messages, response_obj, start_time, end_time, print_verbose):
|
||||
# Method definition
|
||||
try:
|
||||
print_verbose(
|
||||
f"LLMonitor Logging - Enters logging function for model {model}")
|
||||
|
||||
print(model, messages, response_obj, start_time, end_time)
|
||||
|
||||
# headers = {
|
||||
# 'Content-Type': 'application/json'
|
||||
# }
|
||||
|
||||
# prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = self.price_calculator(
|
||||
# model, response_obj, start_time, end_time)
|
||||
# total_cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
|
||||
|
||||
# response_time = (end_time-start_time).total_seconds()
|
||||
# if "response" in response_obj:
|
||||
# data = [{
|
||||
# "response_time": response_time,
|
||||
# "model_id": response_obj["model"],
|
||||
# "total_cost": total_cost,
|
||||
# "messages": messages,
|
||||
# "response": response_obj['choices'][0]['message']['content'],
|
||||
# "account_id": self.account_id
|
||||
# }]
|
||||
# elif "error" in response_obj:
|
||||
# data = [{
|
||||
# "response_time": response_time,
|
||||
# "model_id": response_obj["model"],
|
||||
# "total_cost": total_cost,
|
||||
# "messages": messages,
|
||||
# "error": response_obj['error'],
|
||||
# "account_id": self.account_id
|
||||
# }]
|
||||
|
||||
# print_verbose(f"BerriSpend Logging - final data object: {data}")
|
||||
# response = requests.post(url, headers=headers, json=data)
|
||||
except:
|
||||
# traceback.print_exc()
|
||||
print_verbose(
|
||||
f"LLMonitor Logging Error - {traceback.format_exc()}")
|
||||
pass
|
28
litellm/tests/test_llmonitor_integration.py
Normal file
28
litellm/tests/test_llmonitor_integration.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
#### What this tests ####
|
||||
# This tests if logging to the helicone integration actually works
|
||||
|
||||
from litellm import embedding, completion
|
||||
import litellm
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
import pytest
|
||||
|
||||
# Adds the parent directory to the system path
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
|
||||
litellm.success_callback = ["llmonitor"]
|
||||
|
||||
litellm.set_verbose = True
|
||||
|
||||
user_message = "Hello, how are you?"
|
||||
messages = [{"content": user_message, "role": "user"}]
|
||||
|
||||
|
||||
# openai call
|
||||
response = completion(model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
||||
|
||||
# cohere call
|
||||
# response = completion(model="command-nightly",
|
||||
# messages=[{"role": "user", "content": "Hi 👋 - i'm cohere"}])
|
Loading…
Add table
Add a link
Reference in a new issue