support proxy openai urls

This commit is contained in:
Krrish Dholakia 2023-08-02 19:50:08 -07:00
parent 3f2580c6ee
commit 6cd1960b82
15 changed files with 111 additions and 42 deletions

View file

@ -2,6 +2,10 @@ success_callback = []
failure_callback = []
set_verbose=False
telemetry=True
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
api_base = None
headers = None
####### COMPLETION MODELS ###################
open_ai_chat_completion_models = [
'gpt-3.5-turbo',

View file

@ -75,43 +75,66 @@ def completion(
if azure == True:
# azure configs
openai.api_type = "azure"
openai.api_base = os.environ.get("AZURE_API_BASE")
openai.api_base = litellm.api_base if litellm.api_base is not None else os.environ.get("AZURE_API_BASE")
openai.api_version = os.environ.get("AZURE_API_VERSION")
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
## LOGGING
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
## COMPLETION CALL
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
**optional_params
)
if litellm.headers:
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
headers = litellm.headers,
**optional_params,
)
else:
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
**optional_params
)
elif model in litellm.open_ai_chat_completion_models:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
## LOGGING
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
## COMPLETION CALL
response = openai.ChatCompletion.create(
model=model,
messages = messages,
**optional_params
)
if litellm.headers:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
headers = litellm.headers,
**optional_params
)
else:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
**optional_params
)
elif model in litellm.open_ai_text_completion_models:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
## COMPLETION CALL
response = openai.Completion.create(
if litellm.headers:
response = openai.Completion.create(
model=model,
prompt = prompt
)
prompt = prompt,
headers = litellm.headers,
)
else:
response = openai.Completion.create(
model=model,
prompt = prompt
)
elif "replicate" in model:
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
# checking in case user set it to REPLICATE_API_KEY instead
@ -171,10 +194,10 @@ def completion(
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
## COMPLETION CALL
completion = anthropic.completions.create(
model=model,
prompt=prompt,
max_tokens_to_sample=max_tokens_to_sample
)
model=model,
prompt=prompt,
max_tokens_to_sample=max_tokens_to_sample
)
new_response = {
"choices": [
{

Binary file not shown.

Binary file not shown.

BIN
dist/litellm-0.1.220-py3-none-any.whl vendored Normal file

Binary file not shown.

BIN
dist/litellm-0.1.220.tar.gz vendored Normal file

Binary file not shown.

View file

@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: litellm
Version: 0.1.219
Version: 0.1.220
Summary: Library to easily interface with LLM API providers
Author: BerriAI
License-File: LICENSE

View file

@ -2,6 +2,10 @@ success_callback = []
failure_callback = []
set_verbose=False
telemetry=True
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
api_base = None
headers = None
####### COMPLETION MODELS ###################
open_ai_chat_completion_models = [
'gpt-3.5-turbo',

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -75,43 +75,66 @@ def completion(
if azure == True:
# azure configs
openai.api_type = "azure"
openai.api_base = os.environ.get("AZURE_API_BASE")
openai.api_base = litellm.api_base if litellm.api_base is not None else os.environ.get("AZURE_API_BASE")
openai.api_version = os.environ.get("AZURE_API_VERSION")
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
## LOGGING
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
## COMPLETION CALL
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
**optional_params
)
if litellm.headers:
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
headers = litellm.headers,
**optional_params,
)
else:
response = openai.ChatCompletion.create(
engine=model,
messages = messages,
**optional_params
)
elif model in litellm.open_ai_chat_completion_models:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
## LOGGING
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
## COMPLETION CALL
response = openai.ChatCompletion.create(
model=model,
messages = messages,
**optional_params
)
if litellm.headers:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
headers = litellm.headers,
**optional_params
)
else:
response = openai.ChatCompletion.create(
model=model,
messages = messages,
**optional_params
)
elif model in litellm.open_ai_text_completion_models:
openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1"
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
openai.api_version = None
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
## COMPLETION CALL
response = openai.Completion.create(
if litellm.headers:
response = openai.Completion.create(
model=model,
prompt = prompt
)
prompt = prompt,
headers = litellm.headers,
)
else:
response = openai.Completion.create(
model=model,
prompt = prompt
)
elif "replicate" in model:
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
# checking in case user set it to REPLICATE_API_KEY instead
@ -171,10 +194,10 @@ def completion(
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
## COMPLETION CALL
completion = anthropic.completions.create(
model=model,
prompt=prompt,
max_tokens_to_sample=max_tokens_to_sample
)
model=model,
prompt=prompt,
max_tokens_to_sample=max_tokens_to_sample
)
new_response = {
"choices": [
{

View file

@ -0,0 +1,15 @@
import sys, os
import traceback
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
litellm.api_base = "https://oai.hconeai.com/v1"
litellm.headers = {"Helicone-Auth": f"Bearer {os.getenv('HELICONE_API_KEY')}"}
response = litellm.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "how does a court case get to the Supreme Court?"}]
)
print(response)

View file

@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name='litellm',
version='0.1.219',
version='0.1.220',
description='Library to easily interface with LLM API providers',
author='BerriAI',
packages=[