forked from phoenix/litellm-mirror
support proxy openai urls
This commit is contained in:
parent
3f2580c6ee
commit
6cd1960b82
15 changed files with 111 additions and 42 deletions
|
@ -2,6 +2,10 @@ success_callback = []
|
||||||
failure_callback = []
|
failure_callback = []
|
||||||
set_verbose=False
|
set_verbose=False
|
||||||
telemetry=True
|
telemetry=True
|
||||||
|
|
||||||
|
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
|
||||||
|
api_base = None
|
||||||
|
headers = None
|
||||||
####### COMPLETION MODELS ###################
|
####### COMPLETION MODELS ###################
|
||||||
open_ai_chat_completion_models = [
|
open_ai_chat_completion_models = [
|
||||||
'gpt-3.5-turbo',
|
'gpt-3.5-turbo',
|
||||||
|
|
|
@ -75,43 +75,66 @@ def completion(
|
||||||
if azure == True:
|
if azure == True:
|
||||||
# azure configs
|
# azure configs
|
||||||
openai.api_type = "azure"
|
openai.api_type = "azure"
|
||||||
openai.api_base = os.environ.get("AZURE_API_BASE")
|
openai.api_base = litellm.api_base if litellm.api_base is not None else os.environ.get("AZURE_API_BASE")
|
||||||
openai.api_version = os.environ.get("AZURE_API_VERSION")
|
openai.api_version = os.environ.get("AZURE_API_VERSION")
|
||||||
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
|
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = openai.ChatCompletion.create(
|
if litellm.headers:
|
||||||
engine=model,
|
response = openai.ChatCompletion.create(
|
||||||
messages = messages,
|
engine=model,
|
||||||
**optional_params
|
messages = messages,
|
||||||
)
|
headers = litellm.headers,
|
||||||
|
**optional_params,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
engine=model,
|
||||||
|
messages = messages,
|
||||||
|
**optional_params
|
||||||
|
)
|
||||||
elif model in litellm.open_ai_chat_completion_models:
|
elif model in litellm.open_ai_chat_completion_models:
|
||||||
openai.api_type = "openai"
|
openai.api_type = "openai"
|
||||||
openai.api_base = "https://api.openai.com/v1"
|
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
|
||||||
openai.api_version = None
|
openai.api_version = None
|
||||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = openai.ChatCompletion.create(
|
if litellm.headers:
|
||||||
model=model,
|
response = openai.ChatCompletion.create(
|
||||||
messages = messages,
|
model=model,
|
||||||
**optional_params
|
messages = messages,
|
||||||
)
|
headers = litellm.headers,
|
||||||
|
**optional_params
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model=model,
|
||||||
|
messages = messages,
|
||||||
|
**optional_params
|
||||||
|
)
|
||||||
elif model in litellm.open_ai_text_completion_models:
|
elif model in litellm.open_ai_text_completion_models:
|
||||||
openai.api_type = "openai"
|
openai.api_type = "openai"
|
||||||
openai.api_base = "https://api.openai.com/v1"
|
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
|
||||||
openai.api_version = None
|
openai.api_version = None
|
||||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||||
prompt = " ".join([message["content"] for message in messages])
|
prompt = " ".join([message["content"] for message in messages])
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = openai.Completion.create(
|
if litellm.headers:
|
||||||
|
response = openai.Completion.create(
|
||||||
model=model,
|
model=model,
|
||||||
prompt = prompt
|
prompt = prompt,
|
||||||
)
|
headers = litellm.headers,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.Completion.create(
|
||||||
|
model=model,
|
||||||
|
prompt = prompt
|
||||||
|
)
|
||||||
elif "replicate" in model:
|
elif "replicate" in model:
|
||||||
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
|
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
|
||||||
# checking in case user set it to REPLICATE_API_KEY instead
|
# checking in case user set it to REPLICATE_API_KEY instead
|
||||||
|
@ -171,10 +194,10 @@ def completion(
|
||||||
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
completion = anthropic.completions.create(
|
completion = anthropic.completions.create(
|
||||||
model=model,
|
model=model,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
max_tokens_to_sample=max_tokens_to_sample
|
max_tokens_to_sample=max_tokens_to_sample
|
||||||
)
|
)
|
||||||
new_response = {
|
new_response = {
|
||||||
"choices": [
|
"choices": [
|
||||||
{
|
{
|
||||||
|
|
BIN
dist/litellm-0.1.219-py3-none-any.whl
vendored
BIN
dist/litellm-0.1.219-py3-none-any.whl
vendored
Binary file not shown.
BIN
dist/litellm-0.1.219.tar.gz
vendored
BIN
dist/litellm-0.1.219.tar.gz
vendored
Binary file not shown.
BIN
dist/litellm-0.1.220-py3-none-any.whl
vendored
Normal file
BIN
dist/litellm-0.1.220-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.220.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.220.tar.gz
vendored
Normal file
Binary file not shown.
|
@ -1,6 +1,6 @@
|
||||||
Metadata-Version: 2.1
|
Metadata-Version: 2.1
|
||||||
Name: litellm
|
Name: litellm
|
||||||
Version: 0.1.219
|
Version: 0.1.220
|
||||||
Summary: Library to easily interface with LLM API providers
|
Summary: Library to easily interface with LLM API providers
|
||||||
Author: BerriAI
|
Author: BerriAI
|
||||||
License-File: LICENSE
|
License-File: LICENSE
|
||||||
|
|
|
@ -2,6 +2,10 @@ success_callback = []
|
||||||
failure_callback = []
|
failure_callback = []
|
||||||
set_verbose=False
|
set_verbose=False
|
||||||
telemetry=True
|
telemetry=True
|
||||||
|
|
||||||
|
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
|
||||||
|
api_base = None
|
||||||
|
headers = None
|
||||||
####### COMPLETION MODELS ###################
|
####### COMPLETION MODELS ###################
|
||||||
open_ai_chat_completion_models = [
|
open_ai_chat_completion_models = [
|
||||||
'gpt-3.5-turbo',
|
'gpt-3.5-turbo',
|
||||||
|
|
BIN
litellm/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
litellm/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
litellm/__pycache__/main.cpython-311.pyc
Normal file
BIN
litellm/__pycache__/main.cpython-311.pyc
Normal file
Binary file not shown.
BIN
litellm/__pycache__/timeout.cpython-311.pyc
Normal file
BIN
litellm/__pycache__/timeout.cpython-311.pyc
Normal file
Binary file not shown.
BIN
litellm/__pycache__/utils.cpython-311.pyc
Normal file
BIN
litellm/__pycache__/utils.cpython-311.pyc
Normal file
Binary file not shown.
|
@ -75,43 +75,66 @@ def completion(
|
||||||
if azure == True:
|
if azure == True:
|
||||||
# azure configs
|
# azure configs
|
||||||
openai.api_type = "azure"
|
openai.api_type = "azure"
|
||||||
openai.api_base = os.environ.get("AZURE_API_BASE")
|
openai.api_base = litellm.api_base if litellm.api_base is not None else os.environ.get("AZURE_API_BASE")
|
||||||
openai.api_version = os.environ.get("AZURE_API_VERSION")
|
openai.api_version = os.environ.get("AZURE_API_VERSION")
|
||||||
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
|
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = openai.ChatCompletion.create(
|
if litellm.headers:
|
||||||
engine=model,
|
response = openai.ChatCompletion.create(
|
||||||
messages = messages,
|
engine=model,
|
||||||
**optional_params
|
messages = messages,
|
||||||
)
|
headers = litellm.headers,
|
||||||
|
**optional_params,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
engine=model,
|
||||||
|
messages = messages,
|
||||||
|
**optional_params
|
||||||
|
)
|
||||||
elif model in litellm.open_ai_chat_completion_models:
|
elif model in litellm.open_ai_chat_completion_models:
|
||||||
openai.api_type = "openai"
|
openai.api_type = "openai"
|
||||||
openai.api_base = "https://api.openai.com/v1"
|
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
|
||||||
openai.api_version = None
|
openai.api_version = None
|
||||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = openai.ChatCompletion.create(
|
if litellm.headers:
|
||||||
model=model,
|
response = openai.ChatCompletion.create(
|
||||||
messages = messages,
|
model=model,
|
||||||
**optional_params
|
messages = messages,
|
||||||
)
|
headers = litellm.headers,
|
||||||
|
**optional_params
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model=model,
|
||||||
|
messages = messages,
|
||||||
|
**optional_params
|
||||||
|
)
|
||||||
elif model in litellm.open_ai_text_completion_models:
|
elif model in litellm.open_ai_text_completion_models:
|
||||||
openai.api_type = "openai"
|
openai.api_type = "openai"
|
||||||
openai.api_base = "https://api.openai.com/v1"
|
openai.api_base = litellm.api_base if litellm.api_base is not None else "https://api.openai.com/v1"
|
||||||
openai.api_version = None
|
openai.api_version = None
|
||||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||||
prompt = " ".join([message["content"] for message in messages])
|
prompt = " ".join([message["content"] for message in messages])
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
response = openai.Completion.create(
|
if litellm.headers:
|
||||||
|
response = openai.Completion.create(
|
||||||
model=model,
|
model=model,
|
||||||
prompt = prompt
|
prompt = prompt,
|
||||||
)
|
headers = litellm.headers,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response = openai.Completion.create(
|
||||||
|
model=model,
|
||||||
|
prompt = prompt
|
||||||
|
)
|
||||||
elif "replicate" in model:
|
elif "replicate" in model:
|
||||||
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
|
# replicate defaults to os.environ.get("REPLICATE_API_TOKEN")
|
||||||
# checking in case user set it to REPLICATE_API_KEY instead
|
# checking in case user set it to REPLICATE_API_KEY instead
|
||||||
|
@ -171,10 +194,10 @@ def completion(
|
||||||
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
completion = anthropic.completions.create(
|
completion = anthropic.completions.create(
|
||||||
model=model,
|
model=model,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
max_tokens_to_sample=max_tokens_to_sample
|
max_tokens_to_sample=max_tokens_to_sample
|
||||||
)
|
)
|
||||||
new_response = {
|
new_response = {
|
||||||
"choices": [
|
"choices": [
|
||||||
{
|
{
|
||||||
|
|
15
litellm/tests/test_proxy_api.py
Normal file
15
litellm/tests/test_proxy_api.py
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
import sys, os
|
||||||
|
import traceback
|
||||||
|
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||||
|
import litellm
|
||||||
|
from litellm import embedding, completion
|
||||||
|
|
||||||
|
litellm.api_base = "https://oai.hconeai.com/v1"
|
||||||
|
litellm.headers = {"Helicone-Auth": f"Bearer {os.getenv('HELICONE_API_KEY')}"}
|
||||||
|
|
||||||
|
response = litellm.completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "how does a court case get to the Supreme Court?"}]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response)
|
2
setup.py
2
setup.py
|
@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='litellm',
|
name='litellm',
|
||||||
version='0.1.219',
|
version='0.1.220',
|
||||||
description='Library to easily interface with LLM API providers',
|
description='Library to easily interface with LLM API providers',
|
||||||
author='BerriAI',
|
author='BerriAI',
|
||||||
packages=[
|
packages=[
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue