Merge branch 'main' into main

This commit is contained in:
Ishaan Jaff 2023-09-12 11:43:03 -07:00 committed by GitHub
commit 60e3e42fba
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 246 additions and 18 deletions

View file

@ -0,0 +1,116 @@
# Setting API Keys, Base, Version
LiteLLM allows you to specify the following:
* API Key
* API Base
* API Version
* API Type
You can set the API configs using:
* Environment Variables
* litellm variables `litellm.api_key`
* Passing args to `completion()`
## Environment Variables
### Setting API Keys
Set the liteLLM API key or specific provider key:
```python
import os
# Set OpenAI API key
os.environ["OPENAI_API_KEY"] = "Your API Key"
os.environ["ANTHROPIC_API_KEY"] = "Your API Key"
os.environ["REPLICATE_API_KEY"] = "Your API Key"
os.environ["TOGETHERAI_API_KEY"] = "Your API Key"
```
### Setting API Base, API Version, API Type
```python
# for azure openai
os.environ['AZURE_API_BASE'] = "https://openai-gpt-4-test2-v-12.openai.azure.com/"
os.environ['AZURE_API_VERSION'] = "2023-05-15"
os.environ['AZURE_API_TYPE'] = "your-custom-type"
# for openai
os.environ['OPENAI_API_BASE'] = "https://openai-gpt-4-test2-v-12.openai.azure.com/"
```
## litellm variables
### litellm.api_key
This variable is checked for all providers
```python
import litellm
# openai call
litellm.api_key = "sk-OpenAIKey"
response = litellm.completion(messages=messages, model="gpt-3.5-turbo")
# anthropic call
litellm.api_key = "sk-AnthropicKey"
response = litellm.completion(messages=messages, model="claude-2")
```
### litellm.provider_key (example litellm.openai_key)
```python
litellm.openai_key = "sk-OpenAIKey"
response = litellm.completion(messages=messages, model="gpt-3.5-turbo")
# anthropic call
litellm.anthropic_key = "sk-AnthropicKey"
response = litellm.completion(messages=messages, model="claude-2")
```
### litellm.api_base
```python
import litellm
litellm.api_base = "https://hosted-llm-api.co"
response = litellm.completion(messages=messages, model="gpt-3.5-turbo")
```
### litellm.organization
```python
import litellm
litellm.organization = "LiteLlmOrg"
response = litellm.completion(messages=messages, model="gpt-3.5-turbo")
```
## Passing Args to completion()
You can pass the API key within `completion()` call:
### api_key
```python
from litellm import completion
messages = [{ "content": "Hello, how are you?","role": "user"}]
response = completion("command-nightly", messages, api_key="Your-Api-Key")
```
### api_base
```python
from litellm import completion
messages = [{ "content": "Hello, how are you?","role": "user"}]
response = completion("command-nightly", messages, api_base="https://hosted-llm-api.co")
```
### api_version
```python
from litellm import completion
messages = [{ "content": "Hello, how are you?","role": "user"}]
response = completion("command-nightly", messages, api_version="2023-02-15")
```

View file

@ -73,6 +73,7 @@ const sidebars = {
"providers/custom_openai_proxy",
]
},
"set_keys",
"token_usage",
"exception_mapping",
'debugging/local_debugging',

View file

@ -238,6 +238,7 @@ from .utils import (
register_prompt_template,
validate_environment,
check_valid_key,
get_llm_provider
)
from .main import * # type: ignore
from .integrations import *

View file

@ -18,6 +18,7 @@ from litellm.utils import (
read_config_args,
completion_with_fallbacks,
verify_access_key,
get_llm_provider
)
from .llms import anthropic
from .llms import together_ai
@ -169,6 +170,7 @@ def completion(
completion_call_id=id
)
logging.update_environment_variables(model=model, user=user, optional_params=optional_params, litellm_params=litellm_params)
get_llm_provider(model=model, custom_llm_provider=custom_llm_provider)
if custom_llm_provider == "azure":
# azure configs
openai.api_type = get_secret("AZURE_API_TYPE") or "azure"
@ -179,10 +181,10 @@ def completion(
or get_secret("AZURE_API_BASE")
)
openai.api_version = (
litellm.api_version
if litellm.api_version is not None
else get_secret("AZURE_API_VERSION")
api_version = (
api_version or
litellm.api_version or
get_secret("AZURE_API_VERSION")
)
api_key = (
@ -195,11 +197,11 @@ def completion(
## LOGGING
logging.pre_call(
input=messages,
api_key=openai.api_key,
api_key=api_key,
additional_args={
"headers": litellm.headers,
"api_version": openai.api_version,
"api_base": openai.api_base,
"api_version": api_version,
"api_base": api_base,
},
)
## COMPLETION CALL
@ -209,6 +211,7 @@ def completion(
headers=litellm.headers,
api_key=api_key,
api_base=api_base,
api_version=api_version,
**optional_params,
)
if "stream" in optional_params and optional_params["stream"] == True:
@ -217,12 +220,12 @@ def completion(
## LOGGING
logging.post_call(
input=messages,
api_key=openai.api_key,
api_key=api_key,
original_response=response,
additional_args={
"headers": litellm.headers,
"api_version": openai.api_version,
"api_base": openai.api_base,
"api_version": api_version,
"api_base": api_base,
},
)
elif (

View file

@ -32,6 +32,16 @@ def test_completion_with_empty_model():
pass
def test_completion_with_no_provider():
# test on empty
try:
model = "cerebras/btlm-3b-8k-base"
response = completion(model=model, messages=messages)
except Exception as e:
print(f"error occurred: {e}")
pass
test_completion_with_no_provider()
# # bad key
# temp_key = os.environ.get("OPENAI_API_KEY")
# os.environ["OPENAI_API_KEY"] = "bad-key"

View file

@ -10,7 +10,7 @@ sys.path.insert(
) # Adds the parent directory to the system path
import pytest
import litellm
from litellm.utils import trim_messages, get_token_count
from litellm.utils import trim_messages, get_token_count, get_valid_models
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
@ -22,7 +22,7 @@ def test_basic_trimming():
print(trimmed_messages)
# print(get_token_count(messages=trimmed_messages, model="claude-2"))
assert (get_token_count(messages=trimmed_messages, model="claude-2")) <= 8
test_basic_trimming()
# test_basic_trimming()
def test_basic_trimming_no_max_tokens_specified():
messages = [{"role": "user", "content": "This is a long message that is definitely under the token limit."}]
@ -31,7 +31,7 @@ def test_basic_trimming_no_max_tokens_specified():
print(trimmed_messages)
# print(get_token_count(messages=trimmed_messages, model="claude-2"))
assert (get_token_count(messages=trimmed_messages, model="gpt-4")) <= litellm.model_cost['gpt-4']['max_tokens']
test_basic_trimming_no_max_tokens_specified()
# test_basic_trimming_no_max_tokens_specified()
def test_multiple_messages_trimming():
messages = [
@ -43,7 +43,7 @@ def test_multiple_messages_trimming():
print(trimmed_messages)
# print(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo"))
assert(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo")) <= 20
test_multiple_messages_trimming()
# test_multiple_messages_trimming()
def test_multiple_messages_no_trimming():
messages = [
@ -55,7 +55,7 @@ def test_multiple_messages_no_trimming():
print(trimmed_messages)
assert(messages==trimmed_messages)
test_multiple_messages_no_trimming()
# test_multiple_messages_no_trimming()
def test_large_trimming():
@ -64,4 +64,21 @@ def test_large_trimming():
print("trimmed messages")
print(trimmed_messages)
assert(get_token_count(messages=trimmed_messages, model="random")) <= 20
test_large_trimming()
# test_large_trimming()
def test_get_valid_models():
old_environ = os.environ
os.environ = {'OPENAI_API_KEY': 'temp'} # mock set only openai key in environ
valid_models = get_valid_models()
print(valid_models)
# list of openai supported llms on litellm
expected_models = litellm.open_ai_chat_completion_models + litellm.open_ai_text_completion_models
assert(valid_models == expected_models)
# reset replicate env key
os.environ = old_environ
# test_get_valid_models()

View file

@ -931,6 +931,55 @@ def get_optional_params( # use the openai defaults
return optional_params
return optional_params
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
try:
# check if llm provider provided
if custom_llm_provider:
return model, custom_llm_provider
# check if llm provider part of model name
if model.split("/",1)[0] in litellm.provider_list:
custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1]
return model, custom_llm_provider
# check if model in known model provider list
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_text_completion_models:
custom_llm_provider = "openai"
## cohere
elif model in litellm.cohere_models:
custom_llm_provider = "cohere"
## replicate
elif model in litellm.replicate_models:
custom_llm_provider = "replicate"
## openrouter
elif model in litellm.openrouter_models:
custom_llm_provider = "openrouter"
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
custom_llm_provider = "vertex_ai"
## huggingface
elif model in litellm.huggingface_models:
custom_llm_provider = "huggingface"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## together_ai
elif model in litellm.together_ai_models:
custom_llm_provider = "together_ai"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
## baseten
elif model in litellm.baseten_models:
custom_llm_provider = "baseten"
if custom_llm_provider is None or custom_llm_provider=="":
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider
except Exception as e:
raise e
def get_max_tokens(model: str):
try:
@ -2555,6 +2604,7 @@ def trim_messages(
return messages
# Verify that the user has passed in a valid and active api key
def verify_access_key(access_key:str):
openai.api_key = access_key
@ -2569,3 +2619,33 @@ def verify_access_key(access_key:str):
return True
except:
return False
# this helper reads the .env and returns a list of supported llms for user
def get_valid_models():
try:
# get keys set in .env
environ_keys = os.environ.keys()
valid_providers = []
# for all valid providers, make a list of supported llms
valid_models = []
for provider in litellm.provider_list:
# edge case litellm has together_ai as a provider, it should be togetherai
provider = provider.replace("_", "")
# litellm standardizes expected provider keys to
# PROVIDER_API_KEY. Example: OPENAI_API_KEY, COHERE_API_KEY
expected_provider_key = f"{provider.upper()}_API_KEY"
if expected_provider_key in environ_keys:
# key is set
valid_providers.append(provider)
for provider in valid_providers:
if provider == "azure":
valid_models.append("Azure-LLM")
else:
models_for_provider = litellm.models_by_provider.get(provider, [])
valid_models.extend(models_for_provider)
return valid_models
except:
return [] # NON-Blocking

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.601"
version = "0.1.603"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"