add import manager - make package lighter

This commit is contained in:
ishaan-jaff 2023-08-08 10:37:41 -07:00
parent c891fac444
commit b6f601bb86
6 changed files with 43 additions and 24 deletions

View file

@ -2,7 +2,6 @@
# On success, logs events to Helicone
import dotenv, os
import requests
from anthropic import HUMAN_PROMPT, AI_PROMPT
dotenv.load_dotenv() # Loading env variables using dotenv
import traceback
class HeliconeLogger:
@ -14,6 +13,7 @@ class HeliconeLogger:
self.key = os.getenv('HELICONE_API_KEY')
def claude_mapping(self, model, messages, response_obj):
from anthropic import HUMAN_PROMPT, AI_PROMPT
prompt = f"{HUMAN_PROMPT}"
for message in messages:
if "role" in message:

View file

@ -1,6 +1,5 @@
import os, openai, cohere, replicate, sys
import os, openai, sys
from typing import Any
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from functools import partial
import dotenv, traceback, random, asyncio, time
from copy import deepcopy
@ -13,7 +12,7 @@ from tenacity import (
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
from litellm.utils import get_secret
from litellm.utils import get_secret, install_and_import
####### ENVIRONMENT VARIABLES ###################
dotenv.load_dotenv() # Loading env variables using dotenv
@ -28,9 +27,7 @@ new_response = {
}
]
}
# TODO move this to utils.py
# TODO add translations
# TODO see if this worked - model_name == krrish
####### COMPLETION ENDPOINTS ################
#############################################
async def acompletion(*args, **kwargs):
@ -68,6 +65,7 @@ def completion(
openai.api_type = "azure"
openai.api_base = litellm.api_base if litellm.api_base is not None else get_secret("AZURE_API_BASE")
openai.api_version = litellm.api_version if litellm.api_version is not None else get_secret("AZURE_API_VERSION")
# set key
if api_key:
openai.api_key = api_key
elif litellm.azure_key:
@ -92,6 +90,7 @@ def completion(
)
elif model in litellm.open_ai_chat_completion_models:
openai.api_type = "openai"
# note: if a user sets a custom base - we should ensure this works
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
if litellm.organization:
@ -155,6 +154,8 @@ def completion(
model_response["usage"] = response["usage"]
response = model_response
elif "replicate" in model:
# import replicate/if it fails then pip install replicate
install_and_import("replicate")
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
# checking in case user set it to REPLICATE_API_KEY instead
if not get_secret("REPLICATE_API_TOKEN") and get_secret("REPLICATE_API_KEY"):
@ -194,6 +195,10 @@ def completion(
}
response = model_response
elif model in litellm.anthropic_models:
# import anthropic/if it fails then pip install anthropic
install_and_import("anthropic")
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
#anthropic defaults to os.environ.get("ANTHROPIC_API_KEY")
if api_key:
os.environ["ANTHROPIC_API_KEY"] = api_key
@ -239,6 +244,8 @@ def completion(
}
response = model_response
elif model in litellm.cohere_models:
# import cohere/if it fails then pip install cohere
install_and_import("cohere")
if api_key:
cohere_key = api_key
elif litellm.cohere_key:

View file

@ -7,8 +7,10 @@ sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the
import pytest
import litellm
from litellm import embedding, completion
from infisical import InfisicalClient
# litellm.set_verbose = True
litellm.secret_manager_client = InfisicalClient(token=os.environ["INFISICAL_TOKEN"])
user_message = "Hello, whats the weather in San Francisco??"
messages = [{ "content": user_message,"role": "user"}]
@ -16,6 +18,14 @@ messages = [{ "content": user_message,"role": "user"}]
def logger_fn(user_model_dict):
print(f"user_model_dict: {user_model_dict}")
def test_completion_claude():
try:
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_completion_openai():
try:
response = completion(model="gpt-3.5-turbo", messages=messages)
@ -84,14 +94,6 @@ def test_completion_azure():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_completion_claude():
try:
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_completion_cohere():
try:
response = completion(model="command-nightly", messages=messages, max_tokens=500)

View file

@ -4,7 +4,6 @@ import subprocess, os
import litellm, openai
import random, uuid, requests
import datetime, time
from anthropic import Anthropic
import tiktoken
encoding = tiktoken.get_encoding("cl100k_base")
from .integrations.helicone import HeliconeLogger
@ -34,6 +33,19 @@ def print_verbose(print_statement):
if random.random() <= 0.3:
print("Get help - https://discord.com/invite/wuPM9dRgDw")
####### Package Import Handler ###################
import importlib
import subprocess
def install_and_import(package):
try:
importlib.import_module(package)
except ImportError:
print(f"{package} is not installed. Installing...")
subprocess.call([sys.executable, '-m', 'pip', 'install', package])
finally:
globals()[package] = importlib.import_module(package)
##################################################
####### LOGGING ###################
#Logging function -> log the exact model details + what's being sent | Non-Blocking
def logging(model=None, input=None, azure=False, additional_args={}, logger_fn=None, exception=None):
@ -329,6 +341,8 @@ def prompt_token_calculator(model, messages):
text = " ".join(message["content"] for message in messages)
num_tokens = 0
if "claude" in model:
install_and_import('anthropic')
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
anthropic = Anthropic()
num_tokens = anthropic.count_tokens(text)
else:

View file

@ -8,12 +8,11 @@ readme = "README.md"
[tool.poetry.dependencies]
python = "^3.8"
openai = {extras = ["datalib"], version = "^0.27.8"}
cohere = "^4.18.0"
openai = "^0.27.8"
pytest = "^7.4.0"
pydantic = "^2.1.1"
anthropic = "^0.3.7"
replicate = "^0.10.0"
python-dotenv = "^1.0.0"
tenacity = "^8.0.1"
tiktoken = "^0.4.0"

View file

@ -1,8 +1,5 @@
pydantic
# used by CI/CD testing
openai
cohere
anthropic
replicate
pytest
python-dotenv
openai[datalib]