This commit is contained in:
Krrish Dholakia 2023-09-29 11:33:58 -07:00
parent bb50729a18
commit f04d50d119
15 changed files with 84 additions and 70 deletions

View file

@ -45,8 +45,13 @@ from litellm import completion
import os
## set ENV variables
<<<<<<< HEAD
os.environ["OPENAI_API_KEY"] = "your-openai-key"
os.environ["COHERE_API_KEY"] = "your-cohere-key"
=======
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
>>>>>>> 6a9d754 (remove api key from docs)
messages = [{ "content": "Hello, how are you?","role": "user"}]

View file

@ -17,8 +17,8 @@ config = {
}
# set env var
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your anthropic key
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
sample_text = "how does a court case get to the Supreme Court?" * 1000

View file

@ -57,7 +57,7 @@ You can also access information like latency.
```python
import litellm
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-5b46387675a944d2"
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
messages=[{"role": "user", "content": "Hey!"}]

View file

@ -12,8 +12,8 @@ By default we provide a free $10 community-key to try all providers supported on
from litellm import completion
## set ENV variables
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]

View file

@ -23,7 +23,7 @@ from litellm import completion
import os
## set ENV variables
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
model="gpt-3.5-turbo",
@ -39,7 +39,7 @@ from litellm import completion
import os
## set ENV variables
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
response = completion(
model="claude-2",
@ -133,7 +133,7 @@ from litellm import completion
import os
## set ENV variables
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
model="gpt-3.5-turbo",
@ -150,7 +150,7 @@ from litellm import completion
import os
## set ENV variables
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
response = completion(
model="claude-2",
@ -262,7 +262,7 @@ Pass the completion response to `litellm.completion_cost(completion_response=res
```python
from litellm import completion, completion_cost
import os
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["OPENAI_API_KEY"] = "your-api-key"
response = completion(
model="gpt-3.5-turbo",
@ -289,7 +289,7 @@ import os
from litellm import completion
# use the LiteLLM API Key
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]
@ -304,7 +304,7 @@ import os
from litellm import completion
# use the LiteLLM API Key
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["OPENAI_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]
@ -319,7 +319,7 @@ import os
from litellm import completion
# use the LiteLLM API Key
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]
@ -335,7 +335,7 @@ import os
from litellm import completion
# use the LiteLLM API Key
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]

View file

@ -5,24 +5,18 @@ LiteLLM supports j2-light, j2-mid and j2-ultra from [AI21](https://www.ai21.com/
They're available to use without a waitlist.
### API KEYS
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
```python
import os
os.environ["AI21_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your ai21 key
os.environ["AI21_API_KEY"] = "your-api-key"
```
**Need a dedicated key?**
Email us @ krrish@berri.ai
[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key)
### Sample Usage
```python
from litellm import completion
# set env variable - [OPTIONAL] replace with your ai21 key
os.environ["AI21_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
# set env variable
os.environ["AI21_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]

View file

@ -2,18 +2,12 @@
LiteLLM supports Claude-1, 1.2 and Claude-2.
## API Keys
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
```python
import os
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your anthropic key
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
```
**Need a dedicated key?**
Email us @ krrish@berri.ai
## Supported Models for LiteLLM Key
These are the models that currently work with the "sk-litellm-.." keys.
## Sample Usage
@ -22,7 +16,7 @@ import os
from litellm import completion
# set env - [OPTIONAL] replace with your anthropic key
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Hey! how's it going?"}]
response = completion(model="claude-instant-1", messages=messages)
@ -36,8 +30,8 @@ Just set `stream=True` when calling completion.
import os
from litellm import completion
# set env - [OPTIONAL] replace with your anthropic key
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
# set env
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Hey! how's it going?"}]
response = completion(model="claude-instant-1", messages=messages, stream=True)

View file

@ -3,17 +3,12 @@
LiteLLM supports all LLMs on NLP Cloud.
## API Keys
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
```python
import os
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your nlp cloud key
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
```
**Need a dedicated key?**
Email us @ krrish@berri.ai
[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key)
## Sample Usage
@ -21,8 +16,8 @@ Email us @ krrish@berri.ai
import os
from litellm import completion
# set env - [OPTIONAL] replace with your nlp cloud key
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
# set env
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Hey! how's it going?"}]
response = completion(model="dolphin", messages=messages)
@ -36,8 +31,8 @@ Just set `stream=True` when calling completion.
import os
from litellm import completion
# set env - [OPTIONAL] replace with your nlp cloud key
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
# set env
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Hey! how's it going?"}]
response = completion(model="dolphin", messages=messages, stream=True)
@ -57,7 +52,7 @@ import os
from litellm import completion
# set env - [OPTIONAL] replace with your nlp cloud key
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Hey! how's it going?"}]

View file

@ -2,12 +2,11 @@
LiteLLM supports OpenAI Chat + Text completion and embedding calls.
### API Keys
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
```python
import os
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["OPENAI_API_KEY"] = "your-api-key"
```
**Need a dedicated key?**
Email us @ krrish@berri.ai
@ -19,7 +18,7 @@ Email us @ krrish@berri.ai
import os
from litellm import completion
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["OPENAI_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]

View file

@ -2,26 +2,17 @@
LiteLLM supports all models on Together AI.
## API Keys
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
```python
import os
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your together ai key
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
```
**Need a dedicated key?**
Email us @ krrish@berri.ai
[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key)
## Sample Usage
```python
from litellm import completion
# set env variable - [OPTIONAL] replace with your together ai key
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]

View file

@ -9,8 +9,8 @@ import os
from litellm import completion
## set ENV variables
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
os.environ["OPENAI_API_KEY"] = "your-api-key"
os.environ["COHERE_API_KEY"] = "your-api-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]

View file

@ -437,7 +437,6 @@ def test_completion_openai_litellm_key():
# test_completion_openai_litellm_key()
# commented out for now, as openrouter is quite flaky - causing our deployments to fail. Please run this before pushing changes.
def test_completion_openrouter1():
try:
response = completion(
@ -453,7 +452,7 @@ def test_completion_openrouter1():
def test_completion_openrouter2():
try:
response = completion(
model="google/palm-2-chat-bison",
model="openrouter/openai/gpt-4-32k",
messages=messages,
max_tokens=5,
)
@ -464,6 +463,47 @@ def test_completion_openrouter2():
# test_completion_openrouter()
def test_completion_hf_model_no_provider():
try:
response = completion(
model="WizardLM/WizardLM-70B-V1.0",
messages=messages,
max_tokens=5,
)
# Add any assertions here to check the response
print(response)
pytest.fail(f"Error occurred: {e}")
except Exception as e:
pass
test_completion_hf_model_no_provider()
def test_completion_hf_model_no_provider_2():
try:
response = completion(
model="meta-llama/Llama-2-70b-chat-hf",
messages=messages,
max_tokens=5,
)
# Add any assertions here to check the response
pytest.fail(f"Error occurred: {e}")
except Exception as e:
pass
test_completion_hf_model_no_provider_2()
def test_completion_openrouter2():
try:
response = completion(
model="openrouter/openai/gpt-4-32k",
messages=messages,
max_tokens=5,
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_completion_openai_with_more_optional_params():
try:
response = completion(
@ -865,7 +905,6 @@ def test_completion_with_fallbacks():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# def test_completion_with_fallbacks_multiple_keys():
# print(f"backup key 1: {os.getenv('BACKUP_OPENAI_API_KEY_1')}")
# print(f"backup key 2: {os.getenv('BACKUP_OPENAI_API_KEY_2')}")

View file

@ -1187,7 +1187,7 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
model = model.split("/", 1)[1]
return model, custom_llm_provider
# check if model in known model provider list
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
## openai - chatcompletion + text completion
if model in litellm.open_ai_chat_completion_models:
custom_llm_provider = "openai"
@ -1208,15 +1208,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
## vertex - text + chat models
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
custom_llm_provider = "vertex_ai"
## huggingface
elif model in litellm.huggingface_models:
custom_llm_provider = "huggingface"
## ai21
elif model in litellm.ai21_models:
custom_llm_provider = "ai21"
## together_ai
elif model in litellm.together_ai_models:
custom_llm_provider = "together_ai"
## aleph_alpha
elif model in litellm.aleph_alpha_models:
custom_llm_provider = "aleph_alpha"
@ -1231,6 +1225,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
custom_llm_provider = "petals"
if custom_llm_provider is None or custom_llm_provider=="":
print()
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m")
print()
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
return model, custom_llm_provider
except Exception as e: