forked from phoenix/litellm-mirror
add import manager - make package lighter
This commit is contained in:
parent
c891fac444
commit
b6f601bb86
6 changed files with 43 additions and 24 deletions
|
@ -2,7 +2,6 @@
|
||||||
# On success, logs events to Helicone
|
# On success, logs events to Helicone
|
||||||
import dotenv, os
|
import dotenv, os
|
||||||
import requests
|
import requests
|
||||||
from anthropic import HUMAN_PROMPT, AI_PROMPT
|
|
||||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||||
import traceback
|
import traceback
|
||||||
class HeliconeLogger:
|
class HeliconeLogger:
|
||||||
|
@ -14,6 +13,7 @@ class HeliconeLogger:
|
||||||
self.key = os.getenv('HELICONE_API_KEY')
|
self.key = os.getenv('HELICONE_API_KEY')
|
||||||
|
|
||||||
def claude_mapping(self, model, messages, response_obj):
|
def claude_mapping(self, model, messages, response_obj):
|
||||||
|
from anthropic import HUMAN_PROMPT, AI_PROMPT
|
||||||
prompt = f"{HUMAN_PROMPT}"
|
prompt = f"{HUMAN_PROMPT}"
|
||||||
for message in messages:
|
for message in messages:
|
||||||
if "role" in message:
|
if "role" in message:
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import os, openai, cohere, replicate, sys
|
import os, openai, sys
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import dotenv, traceback, random, asyncio, time
|
import dotenv, traceback, random, asyncio, time
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
@ -13,7 +12,7 @@ from tenacity import (
|
||||||
stop_after_attempt,
|
stop_after_attempt,
|
||||||
wait_random_exponential,
|
wait_random_exponential,
|
||||||
) # for exponential backoff
|
) # for exponential backoff
|
||||||
from litellm.utils import get_secret
|
from litellm.utils import get_secret, install_and_import
|
||||||
####### ENVIRONMENT VARIABLES ###################
|
####### ENVIRONMENT VARIABLES ###################
|
||||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||||
|
|
||||||
|
@ -28,9 +27,7 @@ new_response = {
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
# TODO move this to utils.py
|
|
||||||
# TODO add translations
|
# TODO add translations
|
||||||
# TODO see if this worked - model_name == krrish
|
|
||||||
####### COMPLETION ENDPOINTS ################
|
####### COMPLETION ENDPOINTS ################
|
||||||
#############################################
|
#############################################
|
||||||
async def acompletion(*args, **kwargs):
|
async def acompletion(*args, **kwargs):
|
||||||
|
@ -68,6 +65,7 @@ def completion(
|
||||||
openai.api_type = "azure"
|
openai.api_type = "azure"
|
||||||
openai.api_base = litellm.api_base if litellm.api_base is not None else get_secret("AZURE_API_BASE")
|
openai.api_base = litellm.api_base if litellm.api_base is not None else get_secret("AZURE_API_BASE")
|
||||||
openai.api_version = litellm.api_version if litellm.api_version is not None else get_secret("AZURE_API_VERSION")
|
openai.api_version = litellm.api_version if litellm.api_version is not None else get_secret("AZURE_API_VERSION")
|
||||||
|
# set key
|
||||||
if api_key:
|
if api_key:
|
||||||
openai.api_key = api_key
|
openai.api_key = api_key
|
||||||
elif litellm.azure_key:
|
elif litellm.azure_key:
|
||||||
|
@ -92,6 +90,7 @@ def completion(
|
||||||
)
|
)
|
||||||
elif model in litellm.open_ai_chat_completion_models:
|
elif model in litellm.open_ai_chat_completion_models:
|
||||||
openai.api_type = "openai"
|
openai.api_type = "openai"
|
||||||
|
# note: if a user sets a custom base - we should ensure this works
|
||||||
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
|
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
|
||||||
openai.api_version = None
|
openai.api_version = None
|
||||||
if litellm.organization:
|
if litellm.organization:
|
||||||
|
@ -155,6 +154,8 @@ def completion(
|
||||||
model_response["usage"] = response["usage"]
|
model_response["usage"] = response["usage"]
|
||||||
response = model_response
|
response = model_response
|
||||||
elif "replicate" in model:
|
elif "replicate" in model:
|
||||||
|
# import replicate/if it fails then pip install replicate
|
||||||
|
install_and_import("replicate")
|
||||||
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
|
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
|
||||||
# checking in case user set it to REPLICATE_API_KEY instead
|
# checking in case user set it to REPLICATE_API_KEY instead
|
||||||
if not get_secret("REPLICATE_API_TOKEN") and get_secret("REPLICATE_API_KEY"):
|
if not get_secret("REPLICATE_API_TOKEN") and get_secret("REPLICATE_API_KEY"):
|
||||||
|
@ -194,6 +195,10 @@ def completion(
|
||||||
}
|
}
|
||||||
response = model_response
|
response = model_response
|
||||||
elif model in litellm.anthropic_models:
|
elif model in litellm.anthropic_models:
|
||||||
|
# import anthropic/if it fails then pip install anthropic
|
||||||
|
install_and_import("anthropic")
|
||||||
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
|
|
||||||
#anthropic defaults to os.environ.get("ANTHROPIC_API_KEY")
|
#anthropic defaults to os.environ.get("ANTHROPIC_API_KEY")
|
||||||
if api_key:
|
if api_key:
|
||||||
os.environ["ANTHROPIC_API_KEY"] = api_key
|
os.environ["ANTHROPIC_API_KEY"] = api_key
|
||||||
|
@ -239,6 +244,8 @@ def completion(
|
||||||
}
|
}
|
||||||
response = model_response
|
response = model_response
|
||||||
elif model in litellm.cohere_models:
|
elif model in litellm.cohere_models:
|
||||||
|
# import cohere/if it fails then pip install cohere
|
||||||
|
install_and_import("cohere")
|
||||||
if api_key:
|
if api_key:
|
||||||
cohere_key = api_key
|
cohere_key = api_key
|
||||||
elif litellm.cohere_key:
|
elif litellm.cohere_key:
|
||||||
|
|
|
@ -7,8 +7,10 @@ sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the
|
||||||
import pytest
|
import pytest
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import embedding, completion
|
from litellm import embedding, completion
|
||||||
|
from infisical import InfisicalClient
|
||||||
|
|
||||||
# litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
|
litellm.secret_manager_client = InfisicalClient(token=os.environ["INFISICAL_TOKEN"])
|
||||||
|
|
||||||
user_message = "Hello, whats the weather in San Francisco??"
|
user_message = "Hello, whats the weather in San Francisco??"
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
@ -16,6 +18,14 @@ messages = [{ "content": user_message,"role": "user"}]
|
||||||
def logger_fn(user_model_dict):
|
def logger_fn(user_model_dict):
|
||||||
print(f"user_model_dict: {user_model_dict}")
|
print(f"user_model_dict: {user_model_dict}")
|
||||||
|
|
||||||
|
def test_completion_claude():
|
||||||
|
try:
|
||||||
|
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
def test_completion_openai():
|
def test_completion_openai():
|
||||||
try:
|
try:
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages)
|
response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
|
@ -84,14 +94,6 @@ def test_completion_azure():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
def test_completion_claude():
|
|
||||||
try:
|
|
||||||
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
|
|
||||||
# Add any assertions here to check the response
|
|
||||||
print(response)
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
def test_completion_cohere():
|
def test_completion_cohere():
|
||||||
try:
|
try:
|
||||||
response = completion(model="command-nightly", messages=messages, max_tokens=500)
|
response = completion(model="command-nightly", messages=messages, max_tokens=500)
|
||||||
|
|
|
@ -4,7 +4,6 @@ import subprocess, os
|
||||||
import litellm, openai
|
import litellm, openai
|
||||||
import random, uuid, requests
|
import random, uuid, requests
|
||||||
import datetime, time
|
import datetime, time
|
||||||
from anthropic import Anthropic
|
|
||||||
import tiktoken
|
import tiktoken
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
from .integrations.helicone import HeliconeLogger
|
from .integrations.helicone import HeliconeLogger
|
||||||
|
@ -34,6 +33,19 @@ def print_verbose(print_statement):
|
||||||
if random.random() <= 0.3:
|
if random.random() <= 0.3:
|
||||||
print("Get help - https://discord.com/invite/wuPM9dRgDw")
|
print("Get help - https://discord.com/invite/wuPM9dRgDw")
|
||||||
|
|
||||||
|
####### Package Import Handler ###################
|
||||||
|
import importlib
|
||||||
|
import subprocess
|
||||||
|
def install_and_import(package):
|
||||||
|
try:
|
||||||
|
importlib.import_module(package)
|
||||||
|
except ImportError:
|
||||||
|
print(f"{package} is not installed. Installing...")
|
||||||
|
subprocess.call([sys.executable, '-m', 'pip', 'install', package])
|
||||||
|
finally:
|
||||||
|
globals()[package] = importlib.import_module(package)
|
||||||
|
##################################################
|
||||||
|
|
||||||
####### LOGGING ###################
|
####### LOGGING ###################
|
||||||
#Logging function -> log the exact model details + what's being sent | Non-Blocking
|
#Logging function -> log the exact model details + what's being sent | Non-Blocking
|
||||||
def logging(model=None, input=None, azure=False, additional_args={}, logger_fn=None, exception=None):
|
def logging(model=None, input=None, azure=False, additional_args={}, logger_fn=None, exception=None):
|
||||||
|
@ -329,6 +341,8 @@ def prompt_token_calculator(model, messages):
|
||||||
text = " ".join(message["content"] for message in messages)
|
text = " ".join(message["content"] for message in messages)
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
if "claude" in model:
|
if "claude" in model:
|
||||||
|
install_and_import('anthropic')
|
||||||
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
anthropic = Anthropic()
|
anthropic = Anthropic()
|
||||||
num_tokens = anthropic.count_tokens(text)
|
num_tokens = anthropic.count_tokens(text)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -8,12 +8,11 @@ readme = "README.md"
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.8"
|
python = "^3.8"
|
||||||
openai = {extras = ["datalib"], version = "^0.27.8"}
|
openai = "^0.27.8"
|
||||||
cohere = "^4.18.0"
|
|
||||||
pytest = "^7.4.0"
|
pytest = "^7.4.0"
|
||||||
pydantic = "^2.1.1"
|
|
||||||
anthropic = "^0.3.7"
|
|
||||||
replicate = "^0.10.0"
|
|
||||||
python-dotenv = "^1.0.0"
|
python-dotenv = "^1.0.0"
|
||||||
tenacity = "^8.0.1"
|
tenacity = "^8.0.1"
|
||||||
tiktoken = "^0.4.0"
|
tiktoken = "^0.4.0"
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
pydantic
|
# used by CI/CD testing
|
||||||
openai
|
openai
|
||||||
cohere
|
|
||||||
anthropic
|
|
||||||
replicate
|
|
||||||
pytest
|
pytest
|
||||||
python-dotenv
|
python-dotenv
|
||||||
openai[datalib]
|
openai[datalib]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue