mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
docs(routing.md): add queueing to docs
This commit is contained in:
parent
a2681e353f
commit
9d97082eed
9 changed files with 244 additions and 261 deletions
|
@ -3,11 +3,13 @@ import Tabs from '@theme/Tabs';
|
|||
import TabItem from '@theme/TabItem';
|
||||
|
||||
|
||||
# Router - Load Balancing, Queing
|
||||
# Router - Load Balancing, Queueing
|
||||
|
||||
- Load-balance across multiple deployments (e.g. Azure/OpenAI): Pick the deployment which is below rate-limit and has the least amount of tokens used.
|
||||
- Queuing Requests to ensure requests don't fail
|
||||
LiteLLM manages:
|
||||
- Load-balance across multiple deployments (e.g. Azure/OpenAI)
|
||||
- Prioritizing important requests to ensure they don't fail (i.e. Queueing)
|
||||
|
||||
## Load Balancing
|
||||
(s/o [@paulpierre](https://www.linkedin.com/in/paulpierre/) for his contribution to this implementation)
|
||||
[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/router.py)
|
||||
|
||||
|
@ -56,8 +58,8 @@ print(response)
|
|||
- `router.aembeddings()` - async embeddings endpoint
|
||||
- `router.text_completion()` - completion calls in the old OpenAI `/v1/completions` endpoint format
|
||||
|
||||
## Advanced
|
||||
### Routing Strategies - Shuffle, Rate Limit Aware
|
||||
### Advanced
|
||||
#### Routing Strategies - Shuffle, Rate Limit Aware
|
||||
|
||||
Router provides 2 strategies for routing your calls across multiple deployments:
|
||||
|
||||
|
@ -164,7 +166,7 @@ print(response)
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Caching + Request Timeouts
|
||||
#### Caching + Request Timeouts
|
||||
|
||||
In production, we recommend using a Redis cache. For quickly testing things locally, we also support simple in-memory caching.
|
||||
|
||||
|
@ -190,7 +192,7 @@ router = Router(model_list=model_list,
|
|||
print(response)
|
||||
```
|
||||
|
||||
### Retry failed requests
|
||||
#### Retry failed requests
|
||||
|
||||
For both async + sync functions, we support retrying failed requests.
|
||||
|
||||
|
@ -219,7 +221,7 @@ response = router.completion(model="gpt-3.5-turbo", messages=messages)
|
|||
print(f"response: {response}")
|
||||
```
|
||||
|
||||
### Default litellm.completion/embedding params
|
||||
#### Default litellm.completion/embedding params
|
||||
|
||||
You can also set default params for litellm completion/embedding calls. Here's how to do that:
|
||||
|
||||
|
@ -241,14 +243,51 @@ print(f"response: {response}")
|
|||
```
|
||||
|
||||
|
||||
### Deploy Router
|
||||
#### Deploy Router
|
||||
|
||||
If you want a server to just route requests to different LLM APIs, use our [OpenAI Proxy Server](./simple_proxy.md#multiple-instances-of-1-model)
|
||||
|
||||
## Queuing
|
||||
|
||||
### Quick Start
|
||||
|
||||
This requires a [Redis DB](https://redis.com/) to work.
|
||||
|
||||
Our implementation uses LiteLLM's proxy server + Celery workers to process up to 100 req./s
|
||||
|
||||
[**See Code**](https://github.com/BerriAI/litellm/blob/fbf9cab5b9e35df524e2c9953180c58d92e4cd97/litellm/proxy/proxy_server.py#L589)
|
||||
|
||||
1. Add Redis credentials in a .env file
|
||||
|
||||
```python
|
||||
REDIS_HOST="my-redis-endpoint"
|
||||
REDIS_PORT="my-redis-port"
|
||||
REDIS_PASSWORD="my-redis-password" # [OPTIONAL] if self-hosted
|
||||
REDIS_USERNAME="default" # [OPTIONAL] if self-hosted
|
||||
```
|
||||
|
||||
2. Start litellm server with your model config
|
||||
|
||||
```bash
|
||||
$ litellm --config /path/to/config.yaml --use_queue
|
||||
```
|
||||
|
||||
3. Test (in another window) → sends 100 simultaneous requests to the queue
|
||||
|
||||
```bash
|
||||
$ litellm --test_async --num_requests 100
|
||||
```
|
||||
|
||||
|
||||
### Available Endpoints
|
||||
- `/queue/request` - Queues a /chat/completions request. Returns a job id.
|
||||
- `/queue/response/{id}` - Returns the status of a job. If completed, returns the response as well. Potential status's are: `queued` and `finished`.
|
||||
|
||||
|
||||
## Hosted Router + Request Queing api.litellm.ai
|
||||
Queue your LLM API requests to ensure you're under your rate limits
|
||||
- Step 1: Create a `/queue/reques` request
|
||||
- Step 2: Poll your request, to check if it's completed
|
||||
- Step 1: Make a POST request `/queue/request` (this follows the same input format as an openai `/chat/completions` call, and returns a job id).
|
||||
- Step 2: Make a GET request, `queue/response` to check if it's completed
|
||||
|
||||
|
||||
## Step 1 Add a config to the proxy, generate a temp key
|
||||
|
@ -339,205 +378,4 @@ while True:
|
|||
print("got exception in polling", e)
|
||||
break
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- ## litellm.completion()
|
||||
|
||||
If you're calling litellm.completion(), here's the different reliability options you can enable.
|
||||
|
||||
## Retry failed requests
|
||||
|
||||
Call it in completion like this `completion(..num_retries=2)`.
|
||||
|
||||
|
||||
Here's a quick look at how you can use it:
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
|
||||
user_message = "Hello, whats the weather in San Francisco??"
|
||||
messages = [{"content": user_message, "role": "user"}]
|
||||
|
||||
# normal call
|
||||
response = completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=messages,
|
||||
num_retries=2
|
||||
)
|
||||
```
|
||||
|
||||
## Fallbacks
|
||||
|
||||
## Helper utils
|
||||
LiteLLM supports the following functions for reliability:
|
||||
* `litellm.longer_context_model_fallback_dict`: Dictionary which has a mapping for those models which have larger equivalents
|
||||
* `num_retries`: use tenacity retries
|
||||
* `completion()` with fallbacks: switch between models/keys/api bases in case of errors.
|
||||
|
||||
|
||||
### Context Window Fallbacks
|
||||
```python
|
||||
from litellm import completion
|
||||
|
||||
fallback_dict = {"gpt-3.5-turbo": "gpt-3.5-turbo-16k"}
|
||||
messages = [{"content": "how does a court case get to the Supreme Court?" * 500, "role": "user"}]
|
||||
|
||||
completion(model="gpt-3.5-turbo", messages=messages, context_window_fallback_dict=fallback_dict)
|
||||
```
|
||||
|
||||
### Fallbacks - Switch Models/API Keys/API Bases
|
||||
|
||||
LLM APIs can be unstable, completion() with fallbacks ensures you'll always get a response from your calls
|
||||
|
||||
#### Usage
|
||||
To use fallback models with `completion()`, specify a list of models in the `fallbacks` parameter.
|
||||
|
||||
The `fallbacks` list should include the primary model you want to use, followed by additional models that can be used as backups in case the primary model fails to provide a response.
|
||||
|
||||
#### switch models
|
||||
```python
|
||||
response = completion(model="bad-model", messages=messages,
|
||||
fallbacks=["gpt-3.5-turbo" "command-nightly"])
|
||||
```
|
||||
|
||||
#### switch api keys/bases (E.g. azure deployment)
|
||||
Switch between different keys for the same azure deployment, or use another deployment as well.
|
||||
|
||||
```python
|
||||
api_key="bad-key"
|
||||
response = completion(model="azure/gpt-4", messages=messages, api_key=api_key,
|
||||
fallbacks=[{"api_key": "good-key-1"}, {"api_key": "good-key-2", "api_base": "good-api-base-2"}])
|
||||
```
|
||||
|
||||
[Check out this section for implementation details](#fallbacks-1)
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Fallbacks
|
||||
#### Output from calls
|
||||
```
|
||||
Completion with 'bad-model': got exception Unable to map your input to a model. Check your input - {'model': 'bad-model'
|
||||
|
||||
|
||||
|
||||
completion call gpt-3.5-turbo
|
||||
{
|
||||
"id": "chatcmpl-7qTmVRuO3m3gIBg4aTmAumV1TmQhB",
|
||||
"object": "chat.completion",
|
||||
"created": 1692741891,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "I apologize, but as an AI, I do not have the capability to provide real-time weather updates. However, you can easily check the current weather in San Francisco by using a search engine or checking a weather website or app."
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": 16,
|
||||
"completion_tokens": 46,
|
||||
"total_tokens": 62
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
#### How does fallbacks work
|
||||
|
||||
When you pass `fallbacks` to `completion`, it makes the first `completion` call using the primary model specified as `model` in `completion(model=model)`. If the primary model fails or encounters an error, it automatically tries the `fallbacks` models in the specified order. This ensures a response even if the primary model is unavailable.
|
||||
|
||||
|
||||
#### Key components of Model Fallbacks implementation:
|
||||
* Looping through `fallbacks`
|
||||
* Cool-Downs for rate-limited models
|
||||
|
||||
#### Looping through `fallbacks`
|
||||
Allow `45seconds` for each request. In the 45s this function tries calling the primary model set as `model`. If model fails it loops through the backup `fallbacks` models and attempts to get a response in the allocated `45s` time set here:
|
||||
```python
|
||||
while response == None and time.time() - start_time < 45:
|
||||
for model in fallbacks:
|
||||
```
|
||||
|
||||
#### Cool-Downs for rate-limited models
|
||||
If a model API call leads to an error - allow it to cooldown for `60s`
|
||||
```python
|
||||
except Exception as e:
|
||||
print(f"got exception {e} for model {model}")
|
||||
rate_limited_models.add(model)
|
||||
model_expiration_times[model] = (
|
||||
time.time() + 60
|
||||
) # cool down this selected model
|
||||
pass
|
||||
```
|
||||
|
||||
Before making an LLM API call we check if the selected model is in `rate_limited_models`, if so skip making the API call
|
||||
```python
|
||||
if (
|
||||
model in rate_limited_models
|
||||
): # check if model is currently cooling down
|
||||
if (
|
||||
model_expiration_times.get(model)
|
||||
and time.time() >= model_expiration_times[model]
|
||||
):
|
||||
rate_limited_models.remove(
|
||||
model
|
||||
) # check if it's been 60s of cool down and remove model
|
||||
else:
|
||||
continue # skip model
|
||||
|
||||
```
|
||||
|
||||
#### Full code of completion with fallbacks()
|
||||
```python
|
||||
|
||||
response = None
|
||||
rate_limited_models = set()
|
||||
model_expiration_times = {}
|
||||
start_time = time.time()
|
||||
fallbacks = [kwargs["model"]] + kwargs["fallbacks"]
|
||||
del kwargs["fallbacks"] # remove fallbacks so it's not recursive
|
||||
|
||||
while response == None and time.time() - start_time < 45:
|
||||
for model in fallbacks:
|
||||
# loop thru all models
|
||||
try:
|
||||
if (
|
||||
model in rate_limited_models
|
||||
): # check if model is currently cooling down
|
||||
if (
|
||||
model_expiration_times.get(model)
|
||||
and time.time() >= model_expiration_times[model]
|
||||
):
|
||||
rate_limited_models.remove(
|
||||
model
|
||||
) # check if it's been 60s of cool down and remove model
|
||||
else:
|
||||
continue # skip model
|
||||
|
||||
# delete model from kwargs if it exists
|
||||
if kwargs.get("model"):
|
||||
del kwargs["model"]
|
||||
|
||||
print("making completion call", model)
|
||||
response = litellm.completion(**kwargs, model=model)
|
||||
|
||||
if response != None:
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
print(f"got exception {e} for model {model}")
|
||||
rate_limited_models.add(model)
|
||||
model_expiration_times[model] = (
|
||||
time.time() + 60
|
||||
) # cool down this selected model
|
||||
pass
|
||||
return response
|
||||
``` -->
|
||||
```
|
|
@ -38,6 +38,7 @@ const sidebars = {
|
|||
"completion/model_alias",
|
||||
"completion/batching",
|
||||
"completion/mock_requests",
|
||||
"completion/reliable_completions",
|
||||
],
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue