forked from phoenix/litellm-mirror
pass in api key via params
This commit is contained in:
parent
acb6781317
commit
548fcfdff9
9 changed files with 46 additions and 67 deletions
|
@ -63,7 +63,7 @@ def completion(
|
|||
temperature=1, top_p=1, n=1, stream=False, stop=None, max_tokens=float('inf'),
|
||||
presence_penalty=0, frequency_penalty=0, logit_bias={}, user="",
|
||||
# Optional liteLLM function params
|
||||
*, force_timeout=60, azure=False, logger_fn=None, verbose=False
|
||||
*, api_key=None, force_timeout=60, azure=False, logger_fn=None, verbose=False
|
||||
):
|
||||
try:
|
||||
# check if user passed in any of the OpenAI optional params
|
||||
|
@ -77,7 +77,7 @@ def completion(
|
|||
openai.api_type = "azure"
|
||||
openai.api_base = os.environ.get("AZURE_API_BASE")
|
||||
openai.api_version = os.environ.get("AZURE_API_VERSION")
|
||||
openai.api_key = os.environ.get("AZURE_API_KEY")
|
||||
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
|
||||
## LOGGING
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
|
@ -90,10 +90,9 @@ def completion(
|
|||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||
## LOGGING
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
|
||||
## COMPLETION CALL
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
|
@ -104,7 +103,7 @@ def completion(
|
|||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
## LOGGING
|
||||
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
||||
|
@ -119,6 +118,8 @@ def completion(
|
|||
if not os.environ.get("REPLICATE_API_TOKEN") and os.environ.get("REPLICATE_API_KEY"):
|
||||
replicate_api_token = os.environ.get("REPLICATE_API_KEY")
|
||||
os.environ["REPLICATE_API_TOKEN"] = replicate_api_token
|
||||
elif api_key:
|
||||
os.environ["REPLICATE_API_TOKEN"] = api_key
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
input = {"prompt": prompt}
|
||||
if max_tokens != float('inf'):
|
||||
|
@ -148,6 +149,8 @@ def completion(
|
|||
response = new_response
|
||||
elif model in litellm.anthropic_models:
|
||||
#anthropic defaults to os.environ.get("ANTHROPIC_API_KEY")
|
||||
if api_key:
|
||||
os.environ["ANTHROPIC_API_KEY"] = api_key
|
||||
prompt = f"{HUMAN_PROMPT}"
|
||||
for message in messages:
|
||||
if "role" in message:
|
||||
|
@ -187,7 +190,7 @@ def completion(
|
|||
print_verbose(f"new response: {new_response}")
|
||||
response = new_response
|
||||
elif model in litellm.cohere_models:
|
||||
cohere_key = os.environ.get("COHERE_API_KEY")
|
||||
cohere_key = api_key if api_key is not None else os.environ.get("COHERE_API_KEY")
|
||||
co = cohere.Client(cohere_key)
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
## LOGGING
|
||||
|
@ -210,32 +213,6 @@ def completion(
|
|||
],
|
||||
}
|
||||
response = new_response
|
||||
|
||||
elif model in litellm.open_ai_chat_completion_models:
|
||||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
## LOGGING
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages = messages
|
||||
)
|
||||
elif model in litellm.open_ai_text_completion_models:
|
||||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
## LOGGING
|
||||
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
prompt = prompt
|
||||
)
|
||||
else:
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
args = locals()
|
||||
|
|
BIN
dist/litellm-0.1.217.tar.gz
vendored
BIN
dist/litellm-0.1.217.tar.gz
vendored
Binary file not shown.
Binary file not shown.
BIN
dist/litellm-0.1.218.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.218.tar.gz
vendored
Normal file
Binary file not shown.
|
@ -1,6 +1,6 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: litellm
|
||||
Version: 0.1.217
|
||||
Version: 0.1.218
|
||||
Summary: Library to easily interface with LLM API providers
|
||||
Author: BerriAI
|
||||
License-File: LICENSE
|
||||
|
|
|
@ -63,7 +63,7 @@ def completion(
|
|||
temperature=1, top_p=1, n=1, stream=False, stop=None, max_tokens=float('inf'),
|
||||
presence_penalty=0, frequency_penalty=0, logit_bias={}, user="",
|
||||
# Optional liteLLM function params
|
||||
*, force_timeout=60, azure=False, logger_fn=None, verbose=False
|
||||
*, api_key=None, force_timeout=60, azure=False, logger_fn=None, verbose=False
|
||||
):
|
||||
try:
|
||||
# check if user passed in any of the OpenAI optional params
|
||||
|
@ -77,7 +77,7 @@ def completion(
|
|||
openai.api_type = "azure"
|
||||
openai.api_base = os.environ.get("AZURE_API_BASE")
|
||||
openai.api_version = os.environ.get("AZURE_API_VERSION")
|
||||
openai.api_key = os.environ.get("AZURE_API_KEY")
|
||||
openai.api_key = api_key if api_key is not None else os.environ.get("AZURE_API_KEY")
|
||||
## LOGGING
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
|
@ -90,10 +90,9 @@ def completion(
|
|||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||
## LOGGING
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
|
||||
## COMPLETION CALL
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
|
@ -104,7 +103,7 @@ def completion(
|
|||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
openai.api_key = api_key if api_key is not None else os.environ.get("OPENAI_API_KEY")
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
## LOGGING
|
||||
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
||||
|
@ -119,6 +118,8 @@ def completion(
|
|||
if not os.environ.get("REPLICATE_API_TOKEN") and os.environ.get("REPLICATE_API_KEY"):
|
||||
replicate_api_token = os.environ.get("REPLICATE_API_KEY")
|
||||
os.environ["REPLICATE_API_TOKEN"] = replicate_api_token
|
||||
elif api_key:
|
||||
os.environ["REPLICATE_API_TOKEN"] = api_key
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
input = {"prompt": prompt}
|
||||
if max_tokens != float('inf'):
|
||||
|
@ -148,6 +149,8 @@ def completion(
|
|||
response = new_response
|
||||
elif model in litellm.anthropic_models:
|
||||
#anthropic defaults to os.environ.get("ANTHROPIC_API_KEY")
|
||||
if api_key:
|
||||
os.environ["ANTHROPIC_API_KEY"] = api_key
|
||||
prompt = f"{HUMAN_PROMPT}"
|
||||
for message in messages:
|
||||
if "role" in message:
|
||||
|
@ -187,7 +190,7 @@ def completion(
|
|||
print_verbose(f"new response: {new_response}")
|
||||
response = new_response
|
||||
elif model in litellm.cohere_models:
|
||||
cohere_key = os.environ.get("COHERE_API_KEY")
|
||||
cohere_key = api_key if api_key is not None else os.environ.get("COHERE_API_KEY")
|
||||
co = cohere.Client(cohere_key)
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
## LOGGING
|
||||
|
@ -210,32 +213,6 @@ def completion(
|
|||
],
|
||||
}
|
||||
response = new_response
|
||||
|
||||
elif model in litellm.open_ai_chat_completion_models:
|
||||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
## LOGGING
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
response = openai.ChatCompletion.create(
|
||||
model=model,
|
||||
messages = messages
|
||||
)
|
||||
elif model in litellm.open_ai_text_completion_models:
|
||||
openai.api_type = "openai"
|
||||
openai.api_base = "https://api.openai.com/v1"
|
||||
openai.api_version = None
|
||||
openai.api_key = os.environ.get("OPENAI_API_KEY")
|
||||
prompt = " ".join([message["content"] for message in messages])
|
||||
## LOGGING
|
||||
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
prompt = prompt
|
||||
)
|
||||
else:
|
||||
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
|
||||
args = locals()
|
||||
|
|
25
litellm/tests/test_api_key_param.py
Normal file
25
litellm/tests/test_api_key_param.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
#### What this tests ####
|
||||
# This tests the ability to set api key's via the params instead of as environment variables
|
||||
|
||||
import sys, os
|
||||
import traceback
|
||||
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||
import litellm
|
||||
from litellm import embedding, completion
|
||||
|
||||
litellm.set_verbose = False
|
||||
|
||||
def logger_fn(model_call_object: dict):
|
||||
print(f"model call details: {model_call_object}")
|
||||
|
||||
user_message = "Hello, how are you?"
|
||||
messages = [{ "content": user_message,"role": "user"}]
|
||||
|
||||
temp_key = os.environ.get("OPENAI_API_KEY")
|
||||
os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||
# test on openai completion call
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn, api_key=temp_key)
|
||||
except:
|
||||
print(f"error occurred: {traceback.format_exc()}")
|
||||
pass
|
|
@ -19,7 +19,7 @@ messages = [{ "content": user_message,"role": "user"}]
|
|||
|
||||
# test on openai completion call
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn)
|
||||
score +=1
|
||||
except:
|
||||
print(f"error occurred: {traceback.format_exc()}")
|
||||
|
|
2
setup.py
2
setup.py
|
@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|||
|
||||
setup(
|
||||
name='litellm',
|
||||
version='0.1.217',
|
||||
version='0.1.218',
|
||||
description='Library to easily interface with LLM API providers',
|
||||
author='BerriAI',
|
||||
packages=[
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue