diff --git a/__init__.py b/build/lib/litellm/__init__.py similarity index 100% rename from __init__.py rename to build/lib/litellm/__init__.py diff --git a/main.py b/build/lib/litellm/main.py similarity index 100% rename from main.py rename to build/lib/litellm/main.py diff --git a/dist/litellm-0.1.0-py3-none-any.whl b/dist/litellm-0.1.0-py3-none-any.whl new file mode 100644 index 000000000..9bc973ed7 Binary files /dev/null and b/dist/litellm-0.1.0-py3-none-any.whl differ diff --git a/dist/litellm-0.1.0.tar.gz b/dist/litellm-0.1.0.tar.gz new file mode 100644 index 000000000..f0c18b9b7 Binary files /dev/null and b/dist/litellm-0.1.0.tar.gz differ diff --git a/litellm.egg-info/PKG-INFO b/litellm.egg-info/PKG-INFO new file mode 100644 index 000000000..96f770a0c --- /dev/null +++ b/litellm.egg-info/PKG-INFO @@ -0,0 +1,12 @@ +Metadata-Version: 2.1 +Name: litellm +Version: 0.1.0 +Summary: Library to easily interface with LLM API providers +Home-page: UNKNOWN +Author: Ishaan Jaffer +License: UNKNOWN +Platform: UNKNOWN +License-File: LICENSE + +UNKNOWN + diff --git a/litellm.egg-info/SOURCES.txt b/litellm.egg-info/SOURCES.txt new file mode 100644 index 000000000..2e2bbebec --- /dev/null +++ b/litellm.egg-info/SOURCES.txt @@ -0,0 +1,10 @@ +LICENSE +README.md +setup.py +litellm/__init__.py +litellm/main.py +litellm.egg-info/PKG-INFO +litellm.egg-info/SOURCES.txt +litellm.egg-info/dependency_links.txt +litellm.egg-info/requires.txt +litellm.egg-info/top_level.txt \ No newline at end of file diff --git a/litellm.egg-info/dependency_links.txt b/litellm.egg-info/dependency_links.txt new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/litellm.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/litellm.egg-info/requires.txt b/litellm.egg-info/requires.txt new file mode 100644 index 000000000..156122d04 --- /dev/null +++ b/litellm.egg-info/requires.txt @@ -0,0 +1,3 @@ +openai +cohere +os diff --git a/litellm.egg-info/top_level.txt b/litellm.egg-info/top_level.txt new file mode 100644 index 000000000..8e637fbf5 --- /dev/null +++ b/litellm.egg-info/top_level.txt @@ -0,0 +1 @@ +litellm diff --git a/litellm/__init__.py b/litellm/__init__.py new file mode 100644 index 000000000..25925d83c --- /dev/null +++ b/litellm/__init__.py @@ -0,0 +1 @@ +from .main import * # Import all the symbols from main.py \ No newline at end of file diff --git a/litellm/main.py b/litellm/main.py new file mode 100644 index 000000000..cbefea5a9 --- /dev/null +++ b/litellm/main.py @@ -0,0 +1,103 @@ +import os, openai, cohere + +####### COMPLETION MODELS ################### +open_ai_chat_completion_models = [ + 'gpt-3.5-turbo', + 'gpt-4' +] +open_ai_text_completion_models = [ + 'text-davinci-003' +] + +cohere_models = [ + 'command-nightly', +] + +####### EMBEDDING MODELS ################### +open_ai_embedding_models = [ + 'text-embedding-ada-002' +] + +############################################# + + +####### COMPLETION ENDPOINTS ################ +############################################# +def completion(model, messages, azure=False): + if azure == True: + # azure configs + openai.api_type = "azure" + openai.api_base = os.environ.get("AZURE_API_BASE") + openai.api_version = os.environ.get("AZURE_API_VERSION") + openai.api_key = os.environ.get("AZURE_API_KEY") + response = openai.ChatCompletion.create( + engine=model, + messages = messages + ) + elif model in cohere_models: + cohere_key = os.environ.get("COHERE_API_KEY") + co = cohere.Client(cohere_key) + prompt = " ".join([message["content"] for message in messages]) + response = co.generate( + model=model, + prompt = prompt + ) + new_response = { + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": response[0], + "role": "assistant" + } + } + ], + } + + response = new_response + + elif model in open_ai_chat_completion_models: + openai.api_type = "openai" + openai.api_base = "https://api.openai.com/v1" + openai.api_version = None + openai.api_key = os.environ.get("OPENAI_API_KEY") + response = openai.ChatCompletion.create( + model=model, + messages = messages + ) + elif model in open_ai_text_completion_models: + openai.api_type = "openai" + openai.api_base = "https://api.openai.com/v1" + openai.api_version = None + openai.api_key = os.environ.get("OPENAI_API_KEY") + prompt = " ".join([message["content"] for message in messages]) + response = openai.Completion.create( + model=model, + prompt = prompt + ) + return response + + + +### EMBEDDING ENDPOINTS #################### +def embedding(model, input=[], azure=False): + if azure == True: + # azure configs + openai.api_type = "azure" + openai.api_base = os.environ.get("AZURE_API_BASE") + openai.api_version = os.environ.get("AZURE_API_VERSION") + openai.api_key = os.environ.get("AZURE_API_KEY") + response = openai.Embedding.create(input=input, engine=model) + elif model in open_ai_embedding_models: + openai.api_type = "openai" + openai.api_base = "https://api.openai.com/v1" + openai.api_version = None + openai.api_key = os.environ.get("OPENAI_API_KEY") + response = openai.Embedding.create(input=input, model=model) + return response + + +############################################# +############################################# + diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..f21baea2f --- /dev/null +++ b/setup.py @@ -0,0 +1,16 @@ +from setuptools import setup, find_packages + +setup( + name='litellm', + version='0.1.00', + description='Library to easily interface with LLM API providers', + author='Ishaan Jaffer', + packages=[ + 'litellm' + ], + install_requires=[ + 'openai', + 'cohere', + 'os' + ], +)