forked from phoenix/litellm-mirror
updates
This commit is contained in:
parent
bb50729a18
commit
f04d50d119
15 changed files with 84 additions and 70 deletions
|
@ -45,8 +45,13 @@ from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
|
<<<<<<< HEAD
|
||||||
os.environ["OPENAI_API_KEY"] = "your-openai-key"
|
os.environ["OPENAI_API_KEY"] = "your-openai-key"
|
||||||
os.environ["COHERE_API_KEY"] = "your-cohere-key"
|
os.environ["COHERE_API_KEY"] = "your-cohere-key"
|
||||||
|
=======
|
||||||
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
os.environ["COHERE_API_KEY"] = "your-api-key"
|
||||||
|
>>>>>>> 6a9d754 (remove api key from docs)
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@ config = {
|
||||||
}
|
}
|
||||||
|
|
||||||
# set env var
|
# set env var
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your anthropic key
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
|
|
||||||
sample_text = "how does a court case get to the Supreme Court?" * 1000
|
sample_text = "how does a court case get to the Supreme Court?" * 1000
|
||||||
|
|
|
@ -57,7 +57,7 @@ You can also access information like latency.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-5b46387675a944d2"
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages=[{"role": "user", "content": "Hey!"}]
|
messages=[{"role": "user", "content": "Hey!"}]
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,8 @@ By default we provide a free $10 community-key to try all providers supported on
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
|
os.environ["COHERE_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
@ -39,7 +39,7 @@ from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model="claude-2",
|
model="claude-2",
|
||||||
|
@ -133,7 +133,7 @@ from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
@ -150,7 +150,7 @@ from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model="claude-2",
|
model="claude-2",
|
||||||
|
@ -262,7 +262,7 @@ Pass the completion response to `litellm.completion_cost(completion_response=res
|
||||||
```python
|
```python
|
||||||
from litellm import completion, completion_cost
|
from litellm import completion, completion_cost
|
||||||
import os
|
import os
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
@ -289,7 +289,7 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# use the LiteLLM API Key
|
# use the LiteLLM API Key
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
@ -304,7 +304,7 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# use the LiteLLM API Key
|
# use the LiteLLM API Key
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
@ -319,7 +319,7 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# use the LiteLLM API Key
|
# use the LiteLLM API Key
|
||||||
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
@ -335,7 +335,7 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# use the LiteLLM API Key
|
# use the LiteLLM API Key
|
||||||
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
|
|
@ -5,24 +5,18 @@ LiteLLM supports j2-light, j2-mid and j2-ultra from [AI21](https://www.ai21.com/
|
||||||
They're available to use without a waitlist.
|
They're available to use without a waitlist.
|
||||||
|
|
||||||
### API KEYS
|
### API KEYS
|
||||||
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
os.environ["AI21_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your ai21 key
|
os.environ["AI21_API_KEY"] = "your-api-key"
|
||||||
```
|
```
|
||||||
**Need a dedicated key?**
|
|
||||||
Email us @ krrish@berri.ai
|
|
||||||
|
|
||||||
[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key)
|
|
||||||
|
|
||||||
### Sample Usage
|
### Sample Usage
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env variable - [OPTIONAL] replace with your ai21 key
|
# set env variable
|
||||||
os.environ["AI21_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["AI21_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
||||||
|
|
||||||
|
|
|
@ -2,18 +2,12 @@
|
||||||
LiteLLM supports Claude-1, 1.2 and Claude-2.
|
LiteLLM supports Claude-1, 1.2 and Claude-2.
|
||||||
|
|
||||||
## API Keys
|
## API Keys
|
||||||
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your anthropic key
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
```
|
```
|
||||||
**Need a dedicated key?**
|
|
||||||
Email us @ krrish@berri.ai
|
|
||||||
|
|
||||||
## Supported Models for LiteLLM Key
|
|
||||||
These are the models that currently work with the "sk-litellm-.." keys.
|
|
||||||
|
|
||||||
## Sample Usage
|
## Sample Usage
|
||||||
|
|
||||||
|
@ -22,7 +16,7 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env - [OPTIONAL] replace with your anthropic key
|
# set env - [OPTIONAL] replace with your anthropic key
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
response = completion(model="claude-instant-1", messages=messages)
|
response = completion(model="claude-instant-1", messages=messages)
|
||||||
|
@ -36,8 +30,8 @@ Just set `stream=True` when calling completion.
|
||||||
import os
|
import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env - [OPTIONAL] replace with your anthropic key
|
# set env
|
||||||
os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
response = completion(model="claude-instant-1", messages=messages, stream=True)
|
response = completion(model="claude-instant-1", messages=messages, stream=True)
|
||||||
|
|
|
@ -3,17 +3,12 @@
|
||||||
LiteLLM supports all LLMs on NLP Cloud.
|
LiteLLM supports all LLMs on NLP Cloud.
|
||||||
|
|
||||||
## API Keys
|
## API Keys
|
||||||
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your nlp cloud key
|
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
|
||||||
```
|
```
|
||||||
**Need a dedicated key?**
|
|
||||||
Email us @ krrish@berri.ai
|
|
||||||
|
|
||||||
[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key)
|
|
||||||
|
|
||||||
## Sample Usage
|
## Sample Usage
|
||||||
|
|
||||||
|
@ -21,8 +16,8 @@ Email us @ krrish@berri.ai
|
||||||
import os
|
import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env - [OPTIONAL] replace with your nlp cloud key
|
# set env
|
||||||
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
response = completion(model="dolphin", messages=messages)
|
response = completion(model="dolphin", messages=messages)
|
||||||
|
@ -36,8 +31,8 @@ Just set `stream=True` when calling completion.
|
||||||
import os
|
import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env - [OPTIONAL] replace with your nlp cloud key
|
# set env
|
||||||
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
response = completion(model="dolphin", messages=messages, stream=True)
|
response = completion(model="dolphin", messages=messages, stream=True)
|
||||||
|
@ -57,7 +52,7 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env - [OPTIONAL] replace with your nlp cloud key
|
# set env - [OPTIONAL] replace with your nlp cloud key
|
||||||
os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
os.environ["NLP_CLOUD_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,11 @@
|
||||||
LiteLLM supports OpenAI Chat + Text completion and embedding calls.
|
LiteLLM supports OpenAI Chat + Text completion and embedding calls.
|
||||||
|
|
||||||
### API Keys
|
### API Keys
|
||||||
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
```
|
```
|
||||||
**Need a dedicated key?**
|
**Need a dedicated key?**
|
||||||
Email us @ krrish@berri.ai
|
Email us @ krrish@berri.ai
|
||||||
|
@ -19,7 +18,7 @@ Email us @ krrish@berri.ai
|
||||||
import os
|
import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
|
@ -2,26 +2,17 @@
|
||||||
LiteLLM supports all models on Together AI.
|
LiteLLM supports all models on Together AI.
|
||||||
|
|
||||||
## API Keys
|
## API Keys
|
||||||
We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your together ai key
|
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
|
||||||
```
|
```
|
||||||
|
|
||||||
**Need a dedicated key?**
|
|
||||||
Email us @ krrish@berri.ai
|
|
||||||
|
|
||||||
[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key)
|
|
||||||
|
|
||||||
|
|
||||||
## Sample Usage
|
## Sample Usage
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
# set env variable - [OPTIONAL] replace with your together ai key
|
os.environ["TOGETHERAI_API_KEY"] = "your-api-key"
|
||||||
os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw"
|
|
||||||
|
|
||||||
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,8 @@ import os
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
|
||||||
## set ENV variables
|
## set ENV variables
|
||||||
os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key
|
os.environ["COHERE_API_KEY"] = "your-api-key"
|
||||||
|
|
||||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -437,7 +437,6 @@ def test_completion_openai_litellm_key():
|
||||||
|
|
||||||
# test_completion_openai_litellm_key()
|
# test_completion_openai_litellm_key()
|
||||||
|
|
||||||
# commented out for now, as openrouter is quite flaky - causing our deployments to fail. Please run this before pushing changes.
|
|
||||||
def test_completion_openrouter1():
|
def test_completion_openrouter1():
|
||||||
try:
|
try:
|
||||||
response = completion(
|
response = completion(
|
||||||
|
@ -453,7 +452,7 @@ def test_completion_openrouter1():
|
||||||
def test_completion_openrouter2():
|
def test_completion_openrouter2():
|
||||||
try:
|
try:
|
||||||
response = completion(
|
response = completion(
|
||||||
model="google/palm-2-chat-bison",
|
model="openrouter/openai/gpt-4-32k",
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=5,
|
max_tokens=5,
|
||||||
)
|
)
|
||||||
|
@ -464,6 +463,47 @@ def test_completion_openrouter2():
|
||||||
|
|
||||||
# test_completion_openrouter()
|
# test_completion_openrouter()
|
||||||
|
|
||||||
|
def test_completion_hf_model_no_provider():
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="WizardLM/WizardLM-70B-V1.0",
|
||||||
|
messages=messages,
|
||||||
|
max_tokens=5,
|
||||||
|
)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
test_completion_hf_model_no_provider()
|
||||||
|
|
||||||
|
def test_completion_hf_model_no_provider_2():
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="meta-llama/Llama-2-70b-chat-hf",
|
||||||
|
messages=messages,
|
||||||
|
max_tokens=5,
|
||||||
|
)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
test_completion_hf_model_no_provider_2()
|
||||||
|
|
||||||
|
def test_completion_openrouter2():
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="openrouter/openai/gpt-4-32k",
|
||||||
|
messages=messages,
|
||||||
|
max_tokens=5,
|
||||||
|
)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
def test_completion_openai_with_more_optional_params():
|
def test_completion_openai_with_more_optional_params():
|
||||||
try:
|
try:
|
||||||
response = completion(
|
response = completion(
|
||||||
|
@ -865,7 +905,6 @@ def test_completion_with_fallbacks():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
|
||||||
# def test_completion_with_fallbacks_multiple_keys():
|
# def test_completion_with_fallbacks_multiple_keys():
|
||||||
# print(f"backup key 1: {os.getenv('BACKUP_OPENAI_API_KEY_1')}")
|
# print(f"backup key 1: {os.getenv('BACKUP_OPENAI_API_KEY_1')}")
|
||||||
# print(f"backup key 2: {os.getenv('BACKUP_OPENAI_API_KEY_2')}")
|
# print(f"backup key 2: {os.getenv('BACKUP_OPENAI_API_KEY_2')}")
|
||||||
|
|
|
@ -1187,7 +1187,7 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
|
||||||
model = model.split("/", 1)[1]
|
model = model.split("/", 1)[1]
|
||||||
return model, custom_llm_provider
|
return model, custom_llm_provider
|
||||||
|
|
||||||
# check if model in known model provider list
|
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
|
||||||
## openai - chatcompletion + text completion
|
## openai - chatcompletion + text completion
|
||||||
if model in litellm.open_ai_chat_completion_models:
|
if model in litellm.open_ai_chat_completion_models:
|
||||||
custom_llm_provider = "openai"
|
custom_llm_provider = "openai"
|
||||||
|
@ -1208,15 +1208,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
|
||||||
## vertex - text + chat models
|
## vertex - text + chat models
|
||||||
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
|
elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models:
|
||||||
custom_llm_provider = "vertex_ai"
|
custom_llm_provider = "vertex_ai"
|
||||||
## huggingface
|
|
||||||
elif model in litellm.huggingface_models:
|
|
||||||
custom_llm_provider = "huggingface"
|
|
||||||
## ai21
|
## ai21
|
||||||
elif model in litellm.ai21_models:
|
elif model in litellm.ai21_models:
|
||||||
custom_llm_provider = "ai21"
|
custom_llm_provider = "ai21"
|
||||||
## together_ai
|
|
||||||
elif model in litellm.together_ai_models:
|
|
||||||
custom_llm_provider = "together_ai"
|
|
||||||
## aleph_alpha
|
## aleph_alpha
|
||||||
elif model in litellm.aleph_alpha_models:
|
elif model in litellm.aleph_alpha_models:
|
||||||
custom_llm_provider = "aleph_alpha"
|
custom_llm_provider = "aleph_alpha"
|
||||||
|
@ -1231,6 +1225,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
|
||||||
custom_llm_provider = "petals"
|
custom_llm_provider = "petals"
|
||||||
|
|
||||||
if custom_llm_provider is None or custom_llm_provider=="":
|
if custom_llm_provider is None or custom_llm_provider=="":
|
||||||
|
print()
|
||||||
|
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m")
|
||||||
|
print()
|
||||||
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
|
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
|
||||||
return model, custom_llm_provider
|
return model, custom_llm_provider
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue