diff --git a/build/lib/litellm/__init__.py b/build/lib/litellm/__init__.py index 22e8bfa87..e3d4a8d84 100644 --- a/build/lib/litellm/__init__.py +++ b/build/lib/litellm/__init__.py @@ -2,6 +2,10 @@ success_callback = [] failure_callback = [] set_verbose=False telemetry=True + +####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone +api_base = None +headers = None ####### COMPLETION MODELS ################### open_ai_chat_completion_models = [ 'gpt-3.5-turbo', diff --git a/build/lib/litellm/main.py b/build/lib/litellm/main.py index 55d560814..16faba2bd 100644 --- a/build/lib/litellm/main.py +++ b/build/lib/litellm/main.py @@ -75,43 +75,66 @@ def completion( if azure == True: # azure configs openai.api_type = "azure" - openai.api_base = os.environ.get("AZURE_API_BASE") + openai.api_base = litellm.api_base if litellm.api_base is not None else os.environ.get("AZURE_API_BASE") openai.api_version = os.environ.get("AZURE_API_VERSION") openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY") ## LOGGING logging(model=model, input=messages, azure=azure, logger_fn=logger_fn) ## COMPLETION CALL - response = openai.ChatCompletion.create( - engine=model, - messages = messages, - **optional_params - ) + if litellm.headers: + response = openai.ChatCompletion.create( + engine=model, + messages = messages, + headers = litellm.headers, + **optional_params, + ) + else: + response = openai.ChatCompletion.create( + engine=model, + messages = messages, + **optional_params + ) elif model in litellm.open_ai_chat_completion_models: openai.api_type = "openai" - openai.api_base = "https://api.openai.com/v1" + openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1" openai.api_version = None openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY") ## LOGGING logging(model=model, input=messages, azure=azure, logger_fn=logger_fn) ## COMPLETION CALL - response = openai.ChatCompletion.create( - model=model, - messages = messages, - **optional_params - ) + if litellm.headers: + response = openai.ChatCompletion.create( + model=model, + messages = messages, + headers = litellm.headers, + **optional_params + ) + else: + response = openai.ChatCompletion.create( + model=model, + messages = messages, + **optional_params + ) elif model in litellm.open_ai_text_completion_models: openai.api_type = "openai" - openai.api_base = "https://api.openai.com/v1" + openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1" openai.api_version = None openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY") prompt = " ".join([message["content"] for message in messages]) ## LOGGING logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn) ## COMPLETION CALL - response = openai.Completion.create( + if litellm.headers: + response = openai.Completion.create( model=model, - prompt = prompt - ) + prompt = prompt, + headers = litellm.headers, + ) + else: + response = openai.Completion.create( + model=model, + prompt = prompt + ) elif "replicate" in model: # replicate defaults to os.environ.get("REPLICATE_API_TOKEN") # checking in case user set it to REPLICATE_API_KEY instead @@ -171,10 +194,10 @@ def completion( logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn) ## COMPLETION CALL completion = anthropic.completions.create( - model=model, - prompt=prompt, - max_tokens_to_sample=max_tokens_to_sample - ) + model=model, + prompt=prompt, + max_tokens_to_sample=max_tokens_to_sample + ) new_response = { "choices": [ { diff --git a/dist/litellm-0.1.219-py3-none-any.whl b/dist/litellm-0.1.219-py3-none-any.whl deleted file mode 100644 index 439f6b47a..000000000 Binary files a/dist/litellm-0.1.219-py3-none-any.whl and /dev/null differ diff --git a/dist/litellm-0.1.219.tar.gz b/dist/litellm-0.1.219.tar.gz deleted file mode 100644 index 116eed418..000000000 Binary files a/dist/litellm-0.1.219.tar.gz and /dev/null differ diff --git a/dist/litellm-0.1.220-py3-none-any.whl b/dist/litellm-0.1.220-py3-none-any.whl new file mode 100644 index 000000000..d12f0568c Binary files /dev/null and b/dist/litellm-0.1.220-py3-none-any.whl differ diff --git a/dist/litellm-0.1.220.tar.gz b/dist/litellm-0.1.220.tar.gz new file mode 100644 index 000000000..63d50fdac Binary files /dev/null and b/dist/litellm-0.1.220.tar.gz differ diff --git a/litellm.egg-info/PKG-INFO b/litellm.egg-info/PKG-INFO index f6f15a462..2e24f886b 100644 --- a/litellm.egg-info/PKG-INFO +++ b/litellm.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: litellm -Version: 0.1.219 +Version: 0.1.220 Summary: Library to easily interface with LLM API providers Author: BerriAI License-File: LICENSE diff --git a/litellm/__init__.py b/litellm/__init__.py index 22e8bfa87..e3d4a8d84 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -2,6 +2,10 @@ success_callback = [] failure_callback = [] set_verbose=False telemetry=True + +####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone +api_base = None +headers = None ####### COMPLETION MODELS ################### open_ai_chat_completion_models = [ 'gpt-3.5-turbo', diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 000000000..19b37aa2f Binary files /dev/null and b/litellm/__pycache__/__init__.cpython-311.pyc differ diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc new file mode 100644 index 000000000..7b8de5bd0 Binary files /dev/null and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/timeout.cpython-311.pyc b/litellm/__pycache__/timeout.cpython-311.pyc new file mode 100644 index 000000000..880e08abb Binary files /dev/null and b/litellm/__pycache__/timeout.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc new file mode 100644 index 000000000..61ebdb129 Binary files /dev/null and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/main.py b/litellm/main.py index 55d560814..16faba2bd 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -75,43 +75,66 @@ def completion( if azure == True: # azure configs openai.api_type = "azure" - openai.api_base = os.environ.get("AZURE_API_BASE") + openai.api_base = litellm.api_base if litellm.api_base is not None else os.environ.get("AZURE_API_BASE") openai.api_version = os.environ.get("AZURE_API_VERSION") openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY") ## LOGGING logging(model=model, input=messages, azure=azure, logger_fn=logger_fn) ## COMPLETION CALL - response = openai.ChatCompletion.create( - engine=model, - messages = messages, - **optional_params - ) + if litellm.headers: + response = openai.ChatCompletion.create( + engine=model, + messages = messages, + headers = litellm.headers, + **optional_params, + ) + else: + response = openai.ChatCompletion.create( + engine=model, + messages = messages, + **optional_params + ) elif model in litellm.open_ai_chat_completion_models: openai.api_type = "openai" - openai.api_base = "https://api.openai.com/v1" + openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1" openai.api_version = None openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY") ## LOGGING logging(model=model, input=messages, azure=azure, logger_fn=logger_fn) ## COMPLETION CALL - response = openai.ChatCompletion.create( - model=model, - messages = messages, - **optional_params - ) + if litellm.headers: + response = openai.ChatCompletion.create( + model=model, + messages = messages, + headers = litellm.headers, + **optional_params + ) + else: + response = openai.ChatCompletion.create( + model=model, + messages = messages, + **optional_params + ) elif model in litellm.open_ai_text_completion_models: openai.api_type = "openai" - openai.api_base = "https://api.openai.com/v1" + openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1" openai.api_version = None openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY") prompt = " ".join([message["content"] for message in messages]) ## LOGGING logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn) ## COMPLETION CALL - response = openai.Completion.create( + if litellm.headers: + response = openai.Completion.create( model=model, - prompt = prompt - ) + prompt = prompt, + headers = litellm.headers, + ) + else: + response = openai.Completion.create( + model=model, + prompt = prompt + ) elif "replicate" in model: # replicate defaults to os.environ.get("REPLICATE_API_TOKEN") # checking in case user set it to REPLICATE_API_KEY instead @@ -171,10 +194,10 @@ def completion( logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn) ## COMPLETION CALL completion = anthropic.completions.create( - model=model, - prompt=prompt, - max_tokens_to_sample=max_tokens_to_sample - ) + model=model, + prompt=prompt, + max_tokens_to_sample=max_tokens_to_sample + ) new_response = { "choices": [ { diff --git a/litellm/tests/test_proxy_api.py b/litellm/tests/test_proxy_api.py new file mode 100644 index 000000000..348e83e88 --- /dev/null +++ b/litellm/tests/test_proxy_api.py @@ -0,0 +1,15 @@ +import sys, os +import traceback +sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +import litellm +from litellm import embedding, completion + +litellm.api_base = "https://oai.hconeai.com/v1" +litellm.headers = {"Helicone-Auth": f"Bearer {os.getenv('HELICONE_API_KEY')}"} + +response = litellm.completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "how does a court case get to the Supreme Court?"}] +) + +print(response) \ No newline at end of file diff --git a/setup.py b/setup.py index 524f7ba0c..a33431fb0 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name='litellm', - version='0.1.219', + version='0.1.220', description='Library to easily interface with LLM API providers', author='BerriAI', packages=[