This commit is contained in:
Krrish Dholakia 2023-08-22 16:09:10 -07:00
parent 43fd811f4f
commit b481a66c7f
6 changed files with 78 additions and 29 deletions

View file

@ -1,29 +1,24 @@
#### What this tests #### # #### What this tests ####
# This tests if logging to the litedebugger integration actually works # # This tests if logging to the litedebugger integration actually works
# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this # # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
import sys, os # import sys, os
import traceback # import traceback
import pytest # import pytest
sys.path.insert( # sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
0, os.path.abspath("../..") # import litellm
) # Adds the parent directory to the system path # from litellm import embedding, completion
import litellm
from litellm import embedding, completion
litellm.email = "krrish@berri.ai" # litellm.set_verbose = True
user_message = "Hello, how are you?" # litellm.email = "krrish@berri.ai"
messages = [{"content": user_message, "role": "user"}]
# user_message = "Hello, how are you?"
# messages = [{ "content": user_message,"role": "user"}]
# openai call # #openai call
response = completion( # response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}] # print(f"response: {response}")
) # #bad request call
# # response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
# bad request call
response = completion(
model="chatgpt-test",
messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}],
)

View file

@ -281,16 +281,15 @@ def exception_logging(
####### CLIENT ################### ####### CLIENT ###################
# make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking # make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking
def client(original_function): def client(original_function):
global liteDebuggerClient global liteDebuggerClient, get_all_keys
def function_setup( def function_setup(
*args, **kwargs *args, **kwargs
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try: try:
global callback_list, add_breadcrumb, user_logger_fn global callback_list, add_breadcrumb, user_logger_fn
if ( if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None: # add to input, success and failure callbacks if user is using hosted product
litellm.debugger or os.getenv("LITELLM_EMAIL", None) != None get_all_keys()
): # add to input, success and failure callbacks if user sets debugging to true
litellm.input_callback.append("lite_debugger") litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger") litellm.failure_callback.append("lite_debugger")
@ -1092,6 +1091,61 @@ def modify_integration(integration_name, integration_params):
if "table_name" in integration_params: if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"] Supabase.supabase_table_name = integration_params["table_name"]
####### [BETA] HOSTED PRODUCT ################ - https://docs.litellm.ai/docs/debugging/hosted_debugging
def get_all_keys():
try:
global last_fetched_at
# if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes
user_email = os.getenv("LITELLM_EMAIL") or litellm.email
if user_email:
time_delta = 0
if last_fetched_at != None:
current_time = time.time()
time_delta = current_time - last_fetched_at
if time_delta > 300 or last_fetched_at == None:
# make the api call
last_fetched_at = time.time()
print(f"last_fetched_at: {last_fetched_at}")
response = requests.post(url="http://api.litellm.ai/get_all_keys", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
print_verbose(f"get model key response: {response.text}")
data = response.json()
# update model list
for key, value in data["model_keys"].items(): # follows the LITELLM API KEY format - <UPPERCASE_PROVIDER_NAME>_API_KEY - e.g. HUGGINGFACE_API_KEY
os.environ[key] = value
return "it worked!"
return None
# return None by default
return None
except:
print_verbose(f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}")
pass
def get_model_list():
global last_fetched_at
try:
# if user is using hosted product -> get their updated model list - refresh every 5 minutes
user_email = os.getenv("LITELLM_EMAIL") or litellm.email
if user_email:
time_delta = 0
if last_fetched_at != None:
current_time = time.time()
time_delta = current_time - last_fetched_at
if time_delta > 300 or last_fetched_at == None:
# make the api call
last_fetched_at = time.time()
print(f"last_fetched_at: {last_fetched_at}")
response = requests.post(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
print_verbose(f"get_model_list response: {response.text}")
data = response.json()
# update model list
model_list = data["model_list"]
return model_list
return None
return None # return None by default
except:
print_verbose(f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}")
####### EXCEPTION MAPPING ################ ####### EXCEPTION MAPPING ################
def exception_type(model, original_exception, custom_llm_provider): def exception_type(model, original_exception, custom_llm_provider):

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.454" version = "0.1.455"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"