mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix(router.py): fix linting issues
This commit is contained in:
parent
65917815d5
commit
78fb8cf941
3 changed files with 100 additions and 102 deletions
|
@ -1,5 +1,10 @@
|
||||||
|
import Image from '@theme/IdealImage';
|
||||||
|
|
||||||
# Reliability - Fallbacks, Azure Deployments, etc.
|
# Reliability - Fallbacks, Azure Deployments, etc.
|
||||||
|
|
||||||
|
|
||||||
|
<Image img={require('../img/multiple_deployment.png')} alt="HF_Dashboard" style={{ maxWidth: '100%', height: 'auto' }}/>
|
||||||
|
|
||||||
# Reliability
|
# Reliability
|
||||||
|
|
||||||
LiteLLM helps prevent failed requests in 3 ways:
|
LiteLLM helps prevent failed requests in 3 ways:
|
||||||
|
@ -14,6 +19,99 @@ LiteLLM supports the following functions for reliability:
|
||||||
* `completion()` with fallbacks: switch between models/keys/api bases in case of errors.
|
* `completion()` with fallbacks: switch between models/keys/api bases in case of errors.
|
||||||
* `router()`: An abstraction on top of completion + embeddings to route the request to a deployment with capacity (available tpm/rpm).
|
* `router()`: An abstraction on top of completion + embeddings to route the request to a deployment with capacity (available tpm/rpm).
|
||||||
|
|
||||||
|
## Manage Multiple Deployments
|
||||||
|
|
||||||
|
Use this if you're trying to load-balance across multiple deployments (e.g. Azure/OpenAI).
|
||||||
|
|
||||||
|
`Router` prevents failed requests, by picking the deployment which is below rate-limit and has the least amount of tokens used.
|
||||||
|
|
||||||
|
In production, [Router connects to a Redis Cache](#redis-queue) to track usage across multiple deployments.
|
||||||
|
|
||||||
|
### Quick Start
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import Router
|
||||||
|
|
||||||
|
model_list = [{ # list of model deployments
|
||||||
|
"model_name": "gpt-3.5-turbo", # openai model name
|
||||||
|
"litellm_params": { # params for litellm completion/embedding call
|
||||||
|
"model": "azure/chatgpt-v-2",
|
||||||
|
"api_key": os.getenv("AZURE_API_KEY"),
|
||||||
|
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||||
|
"api_base": os.getenv("AZURE_API_BASE")
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"model_name": "gpt-3.5-turbo", # openai model name
|
||||||
|
"litellm_params": { # params for litellm completion/embedding call
|
||||||
|
"model": "azure/chatgpt-functioncalling",
|
||||||
|
"api_key": os.getenv("AZURE_API_KEY"),
|
||||||
|
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||||
|
"api_base": os.getenv("AZURE_API_BASE")
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"model_name": "gpt-3.5-turbo", # openai model name
|
||||||
|
"litellm_params": { # params for litellm completion/embedding call
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
|
||||||
|
router = Router(model_list=model_list)
|
||||||
|
|
||||||
|
# openai.ChatCompletion.create replacement
|
||||||
|
response = router.completion(model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "Hey, how's it going?"}]
|
||||||
|
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Redis Queue
|
||||||
|
|
||||||
|
In production, we use Redis to track usage across multiple Azure deployments.
|
||||||
|
|
||||||
|
```python
|
||||||
|
router = Router(model_list=model_list,
|
||||||
|
redis_host=os.getenv("REDIS_HOST"),
|
||||||
|
redis_password=os.getenv("REDIS_PASSWORD"),
|
||||||
|
redis_port=os.getenv("REDIS_PORT"))
|
||||||
|
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deploy Router
|
||||||
|
|
||||||
|
1. Clone repo
|
||||||
|
```shell
|
||||||
|
git clone https://github.com/BerriAI/litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create + Modify router_config.yaml (save your azure/openai/etc. deployment info)
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cp ./router_config_template.yaml ./router_config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Build + Run docker image
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker build -t litellm-proxy . --build-arg CONFIG_FILE=./router_config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --name litellm-proxy -e PORT=8000 -p 8000:8000 litellm-proxy
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test
|
||||||
|
|
||||||
|
```curl
|
||||||
|
curl 'http://0.0.0.0:8000/router/completions' \
|
||||||
|
--header 'Content-Type: application/json' \
|
||||||
|
--data '{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [{"role": "user", "content": "Hey"}]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
## Retry failed requests
|
## Retry failed requests
|
||||||
|
|
||||||
Call it in completion like this `completion(..num_retries=2)`.
|
Call it in completion like this `completion(..num_retries=2)`.
|
||||||
|
@ -73,106 +171,6 @@ response = completion(model="azure/gpt-4", messages=messages, api_key=api_key,
|
||||||
|
|
||||||
[Check out this section for implementation details](#fallbacks-1)
|
[Check out this section for implementation details](#fallbacks-1)
|
||||||
|
|
||||||
## Manage Multiple Deployments
|
|
||||||
|
|
||||||
Use this if you're trying to load-balance across multiple deployments (e.g. Azure/OpenAI).
|
|
||||||
|
|
||||||
`Router` prevents failed requests, by picking the deployment which is below rate-limit and has the least amount of tokens used.
|
|
||||||
|
|
||||||
In production, [Router connects to a Redis Cache](#redis-queue) to track usage across multiple deployments.
|
|
||||||
|
|
||||||
### Quick Start
|
|
||||||
|
|
||||||
```python
|
|
||||||
from litellm import Router
|
|
||||||
|
|
||||||
model_list = [{ # list of model deployments
|
|
||||||
"model_name": "gpt-3.5-turbo", # openai model name
|
|
||||||
"litellm_params": { # params for litellm completion/embedding call
|
|
||||||
"model": "azure/chatgpt-v-2",
|
|
||||||
"api_key": os.getenv("AZURE_API_KEY"),
|
|
||||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
|
||||||
"api_base": os.getenv("AZURE_API_BASE")
|
|
||||||
},
|
|
||||||
"tpm": 240000,
|
|
||||||
"rpm": 1800
|
|
||||||
}, {
|
|
||||||
"model_name": "gpt-3.5-turbo", # openai model name
|
|
||||||
"litellm_params": { # params for litellm completion/embedding call
|
|
||||||
"model": "azure/chatgpt-functioncalling",
|
|
||||||
"api_key": os.getenv("AZURE_API_KEY"),
|
|
||||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
|
||||||
"api_base": os.getenv("AZURE_API_BASE")
|
|
||||||
},
|
|
||||||
"tpm": 240000,
|
|
||||||
"rpm": 1800
|
|
||||||
}, {
|
|
||||||
"model_name": "gpt-3.5-turbo", # openai model name
|
|
||||||
"litellm_params": { # params for litellm completion/embedding call
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
|
||||||
},
|
|
||||||
"tpm": 1000000,
|
|
||||||
"rpm": 9000
|
|
||||||
}]
|
|
||||||
|
|
||||||
router = Router(model_list=model_list)
|
|
||||||
|
|
||||||
# openai.ChatCompletion.create replacement
|
|
||||||
response = router.completion(model="gpt-3.5-turbo",
|
|
||||||
messages=[{"role": "user", "content": "Hey, how's it going?"}]
|
|
||||||
|
|
||||||
print(response)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Redis Queue
|
|
||||||
|
|
||||||
In production, we use Redis to track usage across multiple Azure deployments.
|
|
||||||
|
|
||||||
```python
|
|
||||||
router = Router(model_list=model_list,
|
|
||||||
redis_host=os.getenv("REDIS_HOST"),
|
|
||||||
redis_password=os.getenv("REDIS_PASSWORD"),
|
|
||||||
redis_port=os.getenv("REDIS_PORT"))
|
|
||||||
|
|
||||||
print(response)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Deploy Router
|
|
||||||
|
|
||||||
1. Clone repo
|
|
||||||
```shell
|
|
||||||
git clone https://github.com/BerriAI/litellm
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Create + Modify router_config.yaml (save your azure/openai/etc. deployment info)
|
|
||||||
|
|
||||||
```shell
|
|
||||||
cp ./router_config_template.yaml ./router_config.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Build + Run docker image
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker build -t litellm-proxy . --build-arg CONFIG_FILE=./router_config.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --name litellm-proxy -e PORT=8000 -p 8000:8000 litellm-proxy
|
|
||||||
```
|
|
||||||
|
|
||||||
### Test
|
|
||||||
|
|
||||||
```curl
|
|
||||||
curl 'http://0.0.0.0:8000/router/completions' \
|
|
||||||
--header 'Content-Type: application/json' \
|
|
||||||
--data '{
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"messages": [{"role": "user", "content": "Hey"}]
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Implementation Details
|
## Implementation Details
|
||||||
|
|
||||||
### Fallbacks
|
### Fallbacks
|
||||||
|
|
BIN
docs/my-website/img/multiple_deployment.png
Normal file
BIN
docs/my-website/img/multiple_deployment.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 135 KiB |
|
@ -32,7 +32,7 @@ class Router:
|
||||||
cache_responses: bool = False) -> None:
|
cache_responses: bool = False) -> None:
|
||||||
if model_list:
|
if model_list:
|
||||||
self.set_model_list(model_list)
|
self.set_model_list(model_list)
|
||||||
self.healthy_deployments = []
|
self.healthy_deployments: List = []
|
||||||
### HEALTH CHECK THREAD ### - commenting out as further testing required
|
### HEALTH CHECK THREAD ### - commenting out as further testing required
|
||||||
self._start_health_check_thread()
|
self._start_health_check_thread()
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ class Router:
|
||||||
|
|
||||||
data = deployment["litellm_params"]
|
data = deployment["litellm_params"]
|
||||||
# call via litellm.completion()
|
# call via litellm.completion()
|
||||||
return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs})
|
return litellm.text_completion(**{**data, "prompt": prompt, "caching": self.cache_responses, **kwargs}) # type: ignore
|
||||||
|
|
||||||
def embedding(self,
|
def embedding(self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue