mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
make rate limit hadler a class 2
This commit is contained in:
parent
68006ff584
commit
34dc176440
1 changed files with 225 additions and 299 deletions
288
litellm/utils.py
288
litellm/utils.py
|
@ -17,8 +17,14 @@ import datetime, time
|
||||||
import tiktoken
|
import tiktoken
|
||||||
import uuid
|
import uuid
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
import logging
|
||||||
|
import asyncio
|
||||||
from tokenizers import Tokenizer
|
from tokenizers import Tokenizer
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
from dataclasses import (
|
||||||
|
dataclass,
|
||||||
|
field,
|
||||||
|
) # for storing API inputs, outputs, and metadata
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
import importlib.metadata
|
import importlib.metadata
|
||||||
from .integrations.traceloop import TraceloopLogger
|
from .integrations.traceloop import TraceloopLogger
|
||||||
|
@ -3716,100 +3722,105 @@ def get_valid_models():
|
||||||
|
|
||||||
|
|
||||||
############################# BATCH COMPLETION with Rate Limit Throttling #######################
|
############################# BATCH COMPLETION with Rate Limit Throttling #######################
|
||||||
"""
|
@dataclass
|
||||||
API REQUEST PARALLEL PROCESSOR
|
class StatusTracker:
|
||||||
|
"""Stores metadata about the script's progress. Only one instance is created."""
|
||||||
|
|
||||||
Using the OpenAI API to process lots of text quickly takes some care.
|
num_tasks_started: int = 0
|
||||||
If you trickle in a million API requests one by one, they'll take days to complete.
|
num_tasks_in_progress: int = 0 # script ends when this reaches 0
|
||||||
If you flood a million API requests in parallel, they'll exceed the rate limits and fail with errors.
|
num_tasks_succeeded: int = 0
|
||||||
To maximize throughput, parallel requests need to be throttled to stay under rate limits.
|
num_tasks_failed: int = 0
|
||||||
|
num_rate_limit_errors: int = 0
|
||||||
This script parallelizes requests to the OpenAI API while throttling to stay under rate limits.
|
num_api_errors: int = 0 # excluding rate limit errors, counted above
|
||||||
|
num_other_errors: int = 0
|
||||||
Features:
|
time_of_last_rate_limit_error: int = 0 # used to cool off after hitting rate limits
|
||||||
- Streams requests from file, to avoid running out of memory for giant jobs
|
|
||||||
- Makes requests concurrently, to maximize throughput
|
|
||||||
- Throttles request and token usage, to stay under rate limits
|
|
||||||
- Retries failed requests up to {max_attempts} times, to avoid missing data
|
|
||||||
- Logs errors, to diagnose problems with requests
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Inputs:
|
|
||||||
- requests_filepath : str
|
|
||||||
- path to the file containing the requests to be processed
|
|
||||||
- file should be a jsonl file, where each line is a json object with API parameters and an optional metadata field
|
|
||||||
- e.g., {"model": "text-embedding-ada-002", "input": "embed me", "metadata": {"row_id": 1}}
|
|
||||||
- as with all jsonl files, take care that newlines in the content are properly escaped (json.dumps does this automatically)
|
|
||||||
- an example file is provided at examples/data/example_requests_to_parallel_process.jsonl
|
|
||||||
- the code to generate the example file is appended to the bottom of this script
|
|
||||||
- save_filepath : str, optional
|
|
||||||
- path to the file where the results will be saved
|
|
||||||
- file will be a jsonl file, where each line is an array with the original request plus the API response
|
|
||||||
- e.g., [{"model": "text-embedding-ada-002", "input": "embed me"}, {...}]
|
|
||||||
- if omitted, results will be saved to {requests_filename}_results.jsonl
|
|
||||||
- api_key : str, optional
|
|
||||||
- API key to use
|
|
||||||
- if omitted, the script will attempt to read it from an environment variable {os.getenv("OPENAI_API_KEY")}
|
|
||||||
- max_requests_per_minute : float, optional
|
|
||||||
- target number of requests to make per minute (will make less if limited by tokens)
|
|
||||||
- leave headroom by setting this to 50% or 75% of your limit
|
|
||||||
- if requests are limiting you, try batching multiple embeddings or completions into one request
|
|
||||||
- if omitted, will default to 1,500
|
|
||||||
- max_tokens_per_minute : float, optional
|
|
||||||
- target number of tokens to use per minute (will use less if limited by requests)
|
|
||||||
- leave headroom by setting this to 50% or 75% of your limit
|
|
||||||
- if omitted, will default to 125,000
|
|
||||||
- token_encoding_name : str, optional
|
|
||||||
- name of the token encoding used, as defined in the `tiktoken` package
|
|
||||||
- if omitted, will default to "cl100k_base" (used by `text-embedding-ada-002`)
|
|
||||||
- max_attempts : int, optional
|
|
||||||
- number of times to retry a failed request before giving up
|
|
||||||
- if omitted, will default to 5
|
|
||||||
- logging_level : int, optional
|
|
||||||
- level of logging to use; higher numbers will log fewer messages
|
|
||||||
- 40 = ERROR; will log only when requests fail after all retries
|
|
||||||
- 30 = WARNING; will log when requests his rate limits or other errors
|
|
||||||
- 20 = INFO; will log when requests start and the status at finish
|
|
||||||
- 10 = DEBUG; will log various things as the loop runs to see when they occur
|
|
||||||
- if omitted, will default to 20 (INFO).
|
|
||||||
|
|
||||||
The script is structured as follows:
|
|
||||||
- Imports
|
|
||||||
- Define main()
|
|
||||||
- Initialize things
|
|
||||||
- In main loop:
|
|
||||||
- Get next request if one is not already waiting for capacity
|
|
||||||
- Update available token & request capacity
|
|
||||||
- If enough capacity available, call API
|
|
||||||
- The loop pauses if a rate limit error is hit
|
|
||||||
- The loop breaks when no tasks remain
|
|
||||||
- Define dataclasses
|
|
||||||
- StatusTracker (stores script metadata counters; only one instance is created)
|
|
||||||
- APIRequest (stores API inputs, outputs, metadata; one method to call API)
|
|
||||||
- Define functions
|
|
||||||
- append_to_jsonl (writes to results file)
|
|
||||||
- num_tokens_consumed_from_request (bigger function to infer token usage from request)
|
|
||||||
- task_id_generator_function (yields 1, 2, 3, ...)
|
|
||||||
- Run main()
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
# imports
|
@dataclass
|
||||||
import asyncio # for running API calls concurrently
|
class APIRequest:
|
||||||
import json # for saving results to a jsonl file
|
"""Stores an API request's inputs, outputs, and other metadata. Contains a method to make an API call."""
|
||||||
import logging # for logging rate limit warnings and other messages
|
|
||||||
import os # for reading API key
|
task_id: int
|
||||||
import re # for matching endpoint from request URL
|
request_json: dict
|
||||||
import tiktoken # for counting tokens
|
token_consumption: int
|
||||||
import time # for sleeping after rate limit is hit
|
attempts_left: int
|
||||||
from dataclasses import (
|
metadata: dict
|
||||||
dataclass,
|
result: list = field(default_factory=list)
|
||||||
field,
|
|
||||||
) # for storing API inputs, outputs, and metadata
|
async def call_api(
|
||||||
|
self,
|
||||||
|
request_header: dict,
|
||||||
|
retry_queue: asyncio.Queue,
|
||||||
|
save_filepath: str,
|
||||||
|
status_tracker: StatusTracker,
|
||||||
|
):
|
||||||
|
"""Calls the OpenAI API and saves results."""
|
||||||
|
logging.info(f"Making API Call for request #{self.task_id}")
|
||||||
|
error = None
|
||||||
|
try:
|
||||||
|
response = await litellm.acompletion(
|
||||||
|
**self.request_json
|
||||||
|
)
|
||||||
|
print(response)
|
||||||
|
logging.info(f"Completed request #{self.task_id}")
|
||||||
|
except Exception as e:
|
||||||
|
logging.warning(
|
||||||
|
f"Request {self.task_id} failed with error {e}"
|
||||||
|
)
|
||||||
|
status_tracker.num_api_errors += 1
|
||||||
|
error = e
|
||||||
|
print(f"got exception {e}")
|
||||||
|
if "Rate limit" in str(e):
|
||||||
|
status_tracker.time_of_last_rate_limit_error = time.time()
|
||||||
|
status_tracker.num_rate_limit_errors += 1
|
||||||
|
status_tracker.num_api_errors -= (
|
||||||
|
1 # rate limit errors are counted separately
|
||||||
|
)
|
||||||
|
|
||||||
|
if error:
|
||||||
|
self.result.append(error)
|
||||||
|
if self.attempts_left:
|
||||||
|
retry_queue.put_nowait(self)
|
||||||
|
else:
|
||||||
|
logging.error(
|
||||||
|
f"Request {self.request_json} failed after all attempts. Saving errors: {self.result}"
|
||||||
|
)
|
||||||
|
data = (
|
||||||
|
[self.request_json, [str(e) for e in self.result], self.metadata]
|
||||||
|
if self.metadata
|
||||||
|
else [self.request_json, [str(e) for e in self.result]]
|
||||||
|
)
|
||||||
|
self.append_to_jsonl(data, save_filepath)
|
||||||
|
status_tracker.num_tasks_in_progress -= 1
|
||||||
|
status_tracker.num_tasks_failed += 1
|
||||||
|
else:
|
||||||
|
data = (
|
||||||
|
[self.request_json, response, self.metadata]
|
||||||
|
if self.metadata
|
||||||
|
else [self.request_json, response]
|
||||||
|
)
|
||||||
|
self.append_to_jsonl(data, save_filepath)
|
||||||
|
status_tracker.num_tasks_in_progress -= 1
|
||||||
|
status_tracker.num_tasks_succeeded += 1
|
||||||
|
logging.debug(f"Request {self.task_id} saved to {save_filepath}")
|
||||||
|
|
||||||
|
|
||||||
async def batch_completion_rate_limits(
|
def append_to_jsonl(self, data, filename: str) -> None:
|
||||||
|
"""Append a json payload to the end of a jsonl file."""
|
||||||
|
json_string = json.dumps(data)
|
||||||
|
with open(filename, "a") as f:
|
||||||
|
f.write(json_string + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
class RateLimitHandler():
|
||||||
|
def __init__(self, max_tokens_per_minute, max_requests_per_minute):
|
||||||
|
self.max_tokens_per_minute = max_tokens_per_minute
|
||||||
|
self.max_requests_per_minute = max_requests_per_minute
|
||||||
|
print("init rate limit handler")
|
||||||
|
|
||||||
|
|
||||||
|
async def batch_completion(
|
||||||
|
self,
|
||||||
requests_filepath: str = "",
|
requests_filepath: str = "",
|
||||||
jobs: list = [],
|
jobs: list = [],
|
||||||
save_filepath: str = None,
|
save_filepath: str = None,
|
||||||
|
@ -3823,6 +3834,7 @@ async def batch_completion_rate_limits(
|
||||||
|
|
||||||
if save_filepath == None:
|
if save_filepath == None:
|
||||||
save_filepath = "litellm_results.jsonl"
|
save_filepath = "litellm_results.jsonl"
|
||||||
|
print("running batch completion")
|
||||||
|
|
||||||
# constants
|
# constants
|
||||||
seconds_to_pause_after_rate_limit_error = 15
|
seconds_to_pause_after_rate_limit_error = 15
|
||||||
|
@ -3841,7 +3853,7 @@ async def batch_completion_rate_limits(
|
||||||
# initialize trackers
|
# initialize trackers
|
||||||
queue_of_requests_to_retry = asyncio.Queue()
|
queue_of_requests_to_retry = asyncio.Queue()
|
||||||
task_id_generator = (
|
task_id_generator = (
|
||||||
task_id_generator_function()
|
self.task_id_generator_function()
|
||||||
) # generates integer IDs of 1, 2, 3, ...
|
) # generates integer IDs of 1, 2, 3, ...
|
||||||
status_tracker = (
|
status_tracker = (
|
||||||
StatusTracker()
|
StatusTracker()
|
||||||
|
@ -3877,7 +3889,7 @@ async def batch_completion_rate_limits(
|
||||||
next_request = APIRequest(
|
next_request = APIRequest(
|
||||||
task_id=next(task_id_generator),
|
task_id=next(task_id_generator),
|
||||||
request_json=request_json,
|
request_json=request_json,
|
||||||
token_consumption=num_tokens_consumed_from_request(
|
token_consumption=self.num_tokens_consumed_from_request(
|
||||||
request_json, token_encoding_name
|
request_json, token_encoding_name
|
||||||
),
|
),
|
||||||
attempts_left=max_attempts,
|
attempts_left=max_attempts,
|
||||||
|
@ -3976,103 +3988,17 @@ async def batch_completion_rate_limits(
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# dataclasses
|
# dataclasses
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class StatusTracker:
|
|
||||||
"""Stores metadata about the script's progress. Only one instance is created."""
|
|
||||||
|
|
||||||
num_tasks_started: int = 0
|
|
||||||
num_tasks_in_progress: int = 0 # script ends when this reaches 0
|
|
||||||
num_tasks_succeeded: int = 0
|
|
||||||
num_tasks_failed: int = 0
|
|
||||||
num_rate_limit_errors: int = 0
|
|
||||||
num_api_errors: int = 0 # excluding rate limit errors, counted above
|
|
||||||
num_other_errors: int = 0
|
|
||||||
time_of_last_rate_limit_error: int = 0 # used to cool off after hitting rate limits
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class APIRequest:
|
|
||||||
"""Stores an API request's inputs, outputs, and other metadata. Contains a method to make an API call."""
|
|
||||||
|
|
||||||
task_id: int
|
def num_tokens_consumed_from_request(
|
||||||
request_json: dict
|
|
||||||
token_consumption: int
|
|
||||||
attempts_left: int
|
|
||||||
metadata: dict
|
|
||||||
result: list = field(default_factory=list)
|
|
||||||
|
|
||||||
async def call_api(
|
|
||||||
self,
|
self,
|
||||||
request_header: dict,
|
|
||||||
retry_queue: asyncio.Queue,
|
|
||||||
save_filepath: str,
|
|
||||||
status_tracker: StatusTracker,
|
|
||||||
):
|
|
||||||
"""Calls the OpenAI API and saves results."""
|
|
||||||
logging.info(f"Starting request #{self.task_id}")
|
|
||||||
error = None
|
|
||||||
try:
|
|
||||||
response = await litellm.acompletion(
|
|
||||||
**self.request_json
|
|
||||||
)
|
|
||||||
# print("got response", response)
|
|
||||||
logging.info(f"Completed request #{self.task_id}")
|
|
||||||
except Exception as e:
|
|
||||||
logging.warning(
|
|
||||||
f"Request {self.task_id} failed with error {e}"
|
|
||||||
)
|
|
||||||
status_tracker.num_api_errors += 1
|
|
||||||
error = e
|
|
||||||
print(f"got exception {e}")
|
|
||||||
if "Rate limit" in str(e):
|
|
||||||
status_tracker.time_of_last_rate_limit_error = time.time()
|
|
||||||
status_tracker.num_rate_limit_errors += 1
|
|
||||||
status_tracker.num_api_errors -= (
|
|
||||||
1 # rate limit errors are counted separately
|
|
||||||
)
|
|
||||||
|
|
||||||
if error:
|
|
||||||
self.result.append(error)
|
|
||||||
if self.attempts_left:
|
|
||||||
retry_queue.put_nowait(self)
|
|
||||||
else:
|
|
||||||
logging.error(
|
|
||||||
f"Request {self.request_json} failed after all attempts. Saving errors: {self.result}"
|
|
||||||
)
|
|
||||||
data = (
|
|
||||||
[self.request_json, [str(e) for e in self.result], self.metadata]
|
|
||||||
if self.metadata
|
|
||||||
else [self.request_json, [str(e) for e in self.result]]
|
|
||||||
)
|
|
||||||
append_to_jsonl(data, save_filepath)
|
|
||||||
status_tracker.num_tasks_in_progress -= 1
|
|
||||||
status_tracker.num_tasks_failed += 1
|
|
||||||
else:
|
|
||||||
data = (
|
|
||||||
[self.request_json, response, self.metadata]
|
|
||||||
if self.metadata
|
|
||||||
else [self.request_json, response]
|
|
||||||
)
|
|
||||||
append_to_jsonl(data, save_filepath)
|
|
||||||
status_tracker.num_tasks_in_progress -= 1
|
|
||||||
status_tracker.num_tasks_succeeded += 1
|
|
||||||
logging.debug(f"Request {self.task_id} saved to {save_filepath}")
|
|
||||||
|
|
||||||
|
|
||||||
def append_to_jsonl(data, filename: str) -> None:
|
|
||||||
"""Append a json payload to the end of a jsonl file."""
|
|
||||||
json_string = json.dumps(data)
|
|
||||||
with open(filename, "a") as f:
|
|
||||||
f.write(json_string + "\n")
|
|
||||||
|
|
||||||
|
|
||||||
def num_tokens_consumed_from_request(
|
|
||||||
request_json: dict,
|
request_json: dict,
|
||||||
token_encoding_name: str,
|
token_encoding_name: str,
|
||||||
):
|
):
|
||||||
"""Count the number of tokens in the request. Only supports completion and embedding requests."""
|
"""Count the number of tokens in the request. Only supports completion and embedding requests."""
|
||||||
encoding = tiktoken.get_encoding(token_encoding_name)
|
encoding = tiktoken.get_encoding(token_encoding_name)
|
||||||
# if completions request, tokens = prompt + n * max_tokens
|
# if completions request, tokens = prompt + n * max_tokens
|
||||||
|
@ -4092,7 +4018,7 @@ def num_tokens_consumed_from_request(
|
||||||
num_tokens += 2 # every reply is primed with <im_start>assistant
|
num_tokens += 2 # every reply is primed with <im_start>assistant
|
||||||
return num_tokens + completion_tokens
|
return num_tokens + completion_tokens
|
||||||
|
|
||||||
def task_id_generator_function():
|
def task_id_generator_function(self):
|
||||||
"""Generate integers 0, 1, 2, and so on."""
|
"""Generate integers 0, 1, 2, and so on."""
|
||||||
task_id = 0
|
task_id = 0
|
||||||
while True:
|
while True:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue