updating build to include helicone integration

This commit is contained in:
Krrish Dholakia 2023-08-03 08:36:25 -07:00
parent a764401642
commit 10832be1e4
22 changed files with 127 additions and 15 deletions

View file

@ -36,4 +36,4 @@ open_ai_embedding_models = [
from .timeout import timeout
from .utils import client, logging, exception_type # Import all the symbols from main.py
from .main import * # Import all the symbols from main.py
from .integrations import *

View file

@ -0,0 +1 @@
from . import *

View file

@ -0,0 +1,73 @@
#### What this does ####
# On success, logs events to Helicone
import dotenv, os
import requests
from anthropic import HUMAN_PROMPT, AI_PROMPT
dotenv.load_dotenv() # Loading env variables using dotenv
import traceback
class HeliconeLogger:
# Class variables or attributes
helicone_model_list = ["gpt", "claude"]
def __init__(self):
# Instance variables
self.provider_url = "https://api.openai.com/v1"
self.key = os.getenv('HELICONE_API_KEY')
def claude_mapping(self, model, messages, response_obj):
prompt = f"{HUMAN_PROMPT}"
for message in messages:
if "role" in message:
if message["role"] == "user":
prompt += f"{HUMAN_PROMPT}{message['content']}"
else:
prompt += f"{AI_PROMPT}{message['content']}"
else:
prompt += f"{HUMAN_PROMPT}{message['content']}"
prompt += f"{AI_PROMPT}"
claude_provider_request = {"model": model, "prompt": prompt}
claude_response_obj = {"completion": response_obj['choices'][0]['message']['content'], "model": model, "stop_reason": "stop_sequence"}
return claude_provider_request, claude_response_obj
def log_success(self, model, messages, response_obj, start_time, end_time, print_verbose):
# Method definition
try:
print_verbose(f"Helicone Logging - Enters logging function for model {model}")
model = model if any(accepted_model in model for accepted_model in self.helicone_model_list) else "gpt-3.5-turbo"
provider_request = {"model": model, "messages": messages}
if "claude" in model:
provider_request, response_obj = self.claude_mapping(model=model, messages=messages, response_obj=response_obj)
providerResponse = {
"json": response_obj,
"headers": {"openai-version": "2020-10-01"},
"status": 200
}
# Code to be executed
url = "https://api.hconeai.com/oai/v1/log"
headers = {
'Authorization': f'Bearer {self.key}',
'Content-Type': 'application/json'
}
start_time_seconds = int(start_time.timestamp())
start_time_milliseconds = int((start_time.timestamp() - start_time_seconds) * 1000)
end_time_seconds = int(end_time.timestamp())
end_time_milliseconds = int((end_time.timestamp() - end_time_seconds) * 1000)
data = {
"providerRequest": {"url": self.provider_url, "json": provider_request, "meta": {"Helicone-Auth": f"Bearer {self.key}"}},
"providerResponse": providerResponse,
"timing": {"startTime": {"seconds": start_time_seconds, "milliseconds": start_time_milliseconds}, "endTime": {"seconds": end_time_seconds, "milliseconds": end_time_milliseconds}} # {"seconds": .., "milliseconds": ..}
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
print_verbose("Helicone Logging - Success!")
else:
print_verbose(f"Helicone Logging - Error Request was not successful. Status Code: {response.status_code}")
print_verbose(f"Helicone Logging - Error {response.text}")
except:
# traceback.print_exc()
print_verbose(f"Helicone Logging Error - {traceback.format_exc()}")
pass

View file

@ -112,6 +112,7 @@ def set_callbacks(callback_list):
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sentry_sdk'])
import sentry_sdk
sentry_sdk_instance = sentry_sdk
sentry_trace_rate = os.environ.get("SENTRY_API_TRACE_RATE") if "SENTRY_API_TRACE_RATE" in os.environ else "1.0"
sentry_sdk_instance.init(dsn=os.environ.get("SENTRY_API_URL"), traces_sample_rate=float(os.environ.get("SENTRY_API_TRACE_RATE")))
capture_exception = sentry_sdk_instance.capture_exception
add_breadcrumb = sentry_sdk_instance.add_breadcrumb
@ -236,14 +237,16 @@ def handle_success(args, kwargs, result, start_time, end_time):
print_verbose("reaches helicone for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
heliconeLogger.log_success(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time)
heliconeLogger.log_success(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
except:
print_verbose(f"Success Callback Error - {traceback.format_exc()}")
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except:
print_verbose(f"Success Callback Error - {traceback.format_exc()}")
pass

Binary file not shown.

Binary file not shown.

BIN
dist/litellm-0.1.226-py3-none-any.whl vendored Normal file

Binary file not shown.

BIN
dist/litellm-0.1.226.tar.gz vendored Normal file

Binary file not shown.

View file

@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: litellm
Version: 0.1.222
Version: 0.1.226
Summary: Library to easily interface with LLM API providers
Author: BerriAI
License-File: LICENSE

View file

@ -10,4 +10,6 @@ litellm.egg-info/PKG-INFO
litellm.egg-info/SOURCES.txt
litellm.egg-info/dependency_links.txt
litellm.egg-info/requires.txt
litellm.egg-info/top_level.txt
litellm.egg-info/top_level.txt
litellm/integrations/__init__.py
litellm/integrations/helicone.py

View file

@ -36,4 +36,4 @@ open_ai_embedding_models = [
from .timeout import timeout
from .utils import client, logging, exception_type # Import all the symbols from main.py
from .main import * # Import all the symbols from main.py
from .integrations import *

View file

@ -0,0 +1 @@
from . import *

View file

@ -30,9 +30,10 @@ class HeliconeLogger:
return claude_provider_request, claude_response_obj
def log_success(self, model, messages, response_obj, start_time, end_time):
def log_success(self, model, messages, response_obj, start_time, end_time, print_verbose):
# Method definition
try:
print_verbose(f"Helicone Logging - Enters logging function for model {model}")
model = model if any(accepted_model in model for accepted_model in self.helicone_model_list) else "gpt-3.5-turbo"
provider_request = {"model": model, "messages": messages}
@ -61,10 +62,12 @@ class HeliconeLogger:
"timing": {"startTime": {"seconds": start_time_seconds, "milliseconds": start_time_milliseconds}, "endTime": {"seconds": end_time_seconds, "milliseconds": end_time_milliseconds}} # {"seconds": .., "milliseconds": ..}
}
response = requests.post(url, headers=headers, json=data)
# if response.status_code == 200:
# print("Success!")
# else:
# print("Request was not successful. Status Code:", response.status_code)
if response.status_code == 200:
print_verbose("Helicone Logging - Success!")
else:
print_verbose(f"Helicone Logging - Error Request was not successful. Status Code: {response.status_code}")
print_verbose(f"Helicone Logging - Error {response.text}")
except:
# traceback.print_exc()
print_verbose(f"Helicone Logging Error - {traceback.format_exc()}")
pass

View file

@ -30,7 +30,7 @@ def test_completion_openai():
except Exception as e:
traceback.print_exc()
pytest.fail(f"Error occurred: {e}")
test_completion_openai()
def test_completion_claude():
try:
@ -38,14 +38,14 @@ def test_completion_claude():
# Add any assertions here to check the response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_claude()
def test_completion_non_openai():
try:
response = completion(model="command-nightly", messages=messages, logger_fn=logger_fn)
# Add any assertions here to check the response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_non_openai()
def test_embedding_openai():
try:
response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)

View file

@ -0,0 +1,24 @@
#### What this tests ####
# This tests if logging to the helicone integration actually works
import sys, os
import traceback
import pytest
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
litellm.success_callback = ["helicone"]
litellm.set_verbose = True
user_message = "Hello, how are you?"
messages = [{ "content": user_message,"role": "user"}]
#openai call
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
#cohere call
response = completion(model="command-nightly", messages=[{"role": "user", "content": "Hi 👋 - i'm cohere"}])

View file

@ -237,14 +237,16 @@ def handle_success(args, kwargs, result, start_time, end_time):
print_verbose("reaches helicone for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs["messages"]
heliconeLogger.log_success(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time)
heliconeLogger.log_success(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose)
except:
print_verbose(f"Success Callback Error - {traceback.format_exc()}")
pass
if success_handler and callable(success_handler):
success_handler(args, kwargs)
pass
except:
print_verbose(f"Success Callback Error - {traceback.format_exc()}")
pass

View file

@ -2,12 +2,15 @@ from setuptools import setup, find_packages
setup(
name='litellm',
version='0.1.222',
version='0.1.226',
description='Library to easily interface with LLM API providers',
author='BerriAI',
packages=[
'litellm'
],
package_data={
"litellm": ["integrations/*"], # Specify the directory path relative to your package
},
install_requires=[
'openai',
'cohere',