fix(router.py): introducing usage-based-routing

This commit is contained in:
Krrish Dholakia 2023-11-17 17:56:09 -08:00
parent 259c1c7616
commit cf0a9f591c
3 changed files with 133 additions and 130 deletions

View file

@ -4,6 +4,7 @@ import random, threading, time
import litellm, openai import litellm, openai
import logging, asyncio import logging, asyncio
import inspect import inspect
from openai import AsyncOpenAI
class Router: class Router:
""" """
@ -36,7 +37,7 @@ class Router:
num_retries: int = 0, num_retries: int = 0,
timeout: float = 600, timeout: float = 600,
default_litellm_params = {}, # default params for Router.chat.completion.create default_litellm_params = {}, # default params for Router.chat.completion.create
routing_strategy: Literal["simple-shuffle", "least-busy"] = "simple-shuffle") -> None: routing_strategy: Literal["simple-shuffle", "least-busy", "usage-based-routing"] = "simple-shuffle") -> None:
if model_list: if model_list:
self.set_model_list(model_list) self.set_model_list(model_list)
@ -69,8 +70,9 @@ class Router:
if cache_responses: if cache_responses:
litellm.cache = litellm.Cache(**cache_config) # use Redis for caching completion requests litellm.cache = litellm.Cache(**cache_config) # use Redis for caching completion requests
self.cache_responses = cache_responses self.cache_responses = cache_responses
self.cache = litellm.Cache(cache_config) # use Redis for tracking load balancing
## USAGE TRACKING ##
litellm.success_callback = [self.deployment_callback]
def _start_health_check_thread(self): def _start_health_check_thread(self):
""" """
@ -138,6 +140,8 @@ class Router:
potential_deployments.append(item) potential_deployments.append(item)
item = random.choice(potential_deployments) item = random.choice(potential_deployments)
return item or item[0] return item or item[0]
elif self.routing_strategy == "usage-based-routing":
return self.get_usage_based_available_deployment(model=model, messages=messages, input=input)
raise ValueError("No models available.") raise ValueError("No models available.")
@ -242,6 +246,9 @@ class Router:
if k not in data: # prioritize model-specific params > default router params if k not in data: # prioritize model-specific params > default router params
data[k] = v data[k] = v
response = await litellm.acompletion(**{**data, "messages": messages, "caching": self.cache_responses, **kwargs}) response = await litellm.acompletion(**{**data, "messages": messages, "caching": self.cache_responses, **kwargs})
# client = AsyncOpenAI()
# print(f"MAKING OPENAI CALL")
# response = await client.chat.completions.create(model=model, messages=messages)
return response return response
except Exception as e: except Exception as e:
if self.num_retries > 0: if self.num_retries > 0:
@ -301,119 +308,117 @@ class Router:
data[k] = v data[k] = v
return await litellm.aembedding(**{**data, "input": input, "caching": self.cache_responses, **kwargs}) return await litellm.aembedding(**{**data, "input": input, "caching": self.cache_responses, **kwargs})
# def deployment_callback( def deployment_callback(
# self, self,
# kwargs, # kwargs to completion kwargs, # kwargs to completion
# completion_response, # response from completion completion_response, # response from completion
# start_time, end_time # start/end time start_time, end_time # start/end time
# ): ):
# """ """
# Function LiteLLM submits a callback to after a successful Function LiteLLM submits a callback to after a successful
# completion. Purpose of this is ti update TPM/RPM usage per model completion. Purpose of this is to update TPM/RPM usage per model
# """ """
# model_name = kwargs.get('model', None) # i.e. gpt35turbo model_name = kwargs.get('model', None) # i.e. gpt35turbo
# custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
# if custom_llm_provider: if custom_llm_provider:
# model_name = f"{custom_llm_provider}/{model_name}" model_name = f"{custom_llm_provider}/{model_name}"
# total_tokens = completion_response['usage']['total_tokens'] total_tokens = completion_response['usage']['total_tokens']
# self._set_deployment_usage(model_name, total_tokens) self._set_deployment_usage(model_name, total_tokens)
# def get_available_deployment(self, def get_usage_based_available_deployment(self,
# model: str, model: str,
# messages: Optional[List[Dict[str, str]]] = None, messages: Optional[List[Dict[str, str]]] = None,
# input: Optional[Union[str, List]] = None): input: Optional[Union[str, List]] = None):
# """ """
# Returns a deployment with the lowest TPM/RPM usage. Returns a deployment with the lowest TPM/RPM usage.
# """ """
# # get list of potential deployments # get list of potential deployments
# potential_deployments = [] potential_deployments = []
# for item in self.model_list: for item in self.model_list:
# if item["model_name"] == model: if item["model_name"] == model:
# potential_deployments.append(item) potential_deployments.append(item)
# # set first model as current model to calculate token count # get current call usage
# deployment = potential_deployments[0] token_count = 0
if messages is not None:
token_count = litellm.token_counter(model=model, messages=messages)
elif input is not None:
if isinstance(input, List):
input_text = "".join(text for text in input)
else:
input_text = input
token_count = litellm.token_counter(model=model, text=input_text)
# # get encoding # -----------------------
# token_count = 0 # Find lowest used model
# if messages is not None: # ----------------------
# token_count = litellm.token_counter(model=deployment["model_name"], messages=messages) lowest_tpm = float("inf")
# elif input is not None: deployment = None
# if isinstance(input, List):
# input_text = "".join(text for text in input)
# else:
# input_text = input
# token_count = litellm.token_counter(model=deployment["model_name"], text=input_text)
# # ----------------------- # return deployment with lowest tpm usage
# # Find lowest used model for item in potential_deployments:
# # ---------------------- item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"])
# lowest_tpm = float("inf")
# deployment = None
# # Go through all the models to get tpm, rpm if item_tpm == 0:
# for item in potential_deployments: return item
# item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"]) elif ("tpm" in item and item_tpm + token_count > item["tpm"]
or "rpm" in item and item_rpm + 1 >= item["rpm"]): # if user passed in tpm / rpm in the model_list
continue
elif item_tpm < lowest_tpm:
lowest_tpm = item_tpm
deployment = item
# if item_tpm == 0: # if none, raise exception
# return item if deployment is None:
# elif item_tpm + token_count > item["tpm"] or item_rpm + 1 >= item["rpm"]: raise ValueError("No models available.")
# continue
# elif item_tpm < lowest_tpm:
# lowest_tpm = item_tpm
# deployment = item
# # if none, raise exception # return model
# if deployment is None: return deployment
# raise ValueError("No models available.")
# # return model def _get_deployment_usage(
# return deployment self,
deployment_name: str
):
# ------------
# Setup values
# ------------
current_minute = datetime.now().strftime("%H-%M")
tpm_key = f'{deployment_name}:tpm:{current_minute}'
rpm_key = f'{deployment_name}:rpm:{current_minute}'
# def _get_deployment_usage( # ------------
# self, # Return usage
# deployment_name: str # ------------
# ): tpm = self.cache.get_cache(cache_key=tpm_key) or 0
# # ------------ rpm = self.cache.get_cache(cache_key=rpm_key) or 0
# # Setup values
# # ------------
# current_minute = datetime.now().strftime("%H-%M")
# tpm_key = f'{deployment_name}:tpm:{current_minute}'
# rpm_key = f'{deployment_name}:rpm:{current_minute}'
# # ------------ return int(tpm), int(rpm)
# # Return usage
# # ------------
# tpm = self.cache.get_cache(cache_key=tpm_key) or 0
# rpm = self.cache.get_cache(cache_key=rpm_key) or 0
# return int(tpm), int(rpm) def increment(self, key: str, increment_value: int):
# get value
cached_value = self.cache.get_cache(cache_key=key)
# update value
try:
cached_value = cached_value + increment_value
except:
cached_value = increment_value
# save updated value
self.cache.add_cache(result=cached_value, cache_key=key, ttl=self.default_cache_time_seconds)
# def increment(self, key: str, increment_value: int): def _set_deployment_usage(
# # get value self,
# cached_value = self.cache.get_cache(cache_key=key) model_name: str,
# # update value total_tokens: int
# try: ):
# cached_value = cached_value + increment_value # ------------
# except: # Setup values
# cached_value = increment_value # ------------
# # save updated value current_minute = datetime.now().strftime("%H-%M")
# self.cache.add_cache(result=cached_value, cache_key=key, ttl=self.default_cache_time_seconds) tpm_key = f'{model_name}:tpm:{current_minute}'
rpm_key = f'{model_name}:rpm:{current_minute}'
# def _set_deployment_usage( # ------------
# self, # Update usage
# model_name: str, # ------------
# total_tokens: int self.increment(tpm_key, total_tokens)
# ): self.increment(rpm_key, 1)
# # ------------
# # Setup values
# # ------------
# current_minute = datetime.now().strftime("%H-%M")
# tpm_key = f'{model_name}:tpm:{current_minute}'
# rpm_key = f'{model_name}:rpm:{current_minute}'
# # ------------
# # Update usage
# # ------------
# self.increment(tpm_key, total_tokens)
# self.increment(rpm_key, 1)

View file

@ -239,14 +239,31 @@ def test_acompletion_on_router():
"tpm": 100000, "tpm": 100000,
"rpm": 10000, "rpm": 10000,
}, },
{
"model_name": "gpt-3.5-turbo",
"litellm_params": {
"model": "chatgpt-v-2",
"api_key": os.getenv("AZURE_API_KEY"),
"api_base": os.getenv("AZURE_API_BASE"),
"api_version": os.getenv("AZURE_API_VERSION")
},
"tpm": 100000,
"rpm": 10000,
}
] ]
messages = [ messages = [
{"role": "user", "content": "What is the weather like in SF?"} {"role": "user", "content": "What is the weather like in Boston?"}
] ]
start_time = time.time()
async def get_response(): async def get_response():
router = Router(model_list=model_list, redis_host=os.environ["REDIS_HOST"], redis_password=os.environ["REDIS_PASSWORD"], redis_port=os.environ["REDIS_PORT"], cache_responses=True, timeout=10) router = Router(model_list=model_list,
redis_host=os.environ["REDIS_HOST"],
redis_password=os.environ["REDIS_PASSWORD"],
redis_port=os.environ["REDIS_PORT"],
cache_responses=True,
timeout=30,
routing_strategy="usage-based-routing")
response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages) response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages)
print(f"response1: {response1}") print(f"response1: {response1}")
response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages) response2 = await router.acompletion(model="gpt-3.5-turbo", messages=messages)
@ -254,6 +271,8 @@ def test_acompletion_on_router():
assert response1["choices"][0]["message"]["content"] == response2["choices"][0]["message"]["content"] assert response1["choices"][0]["message"]["content"] == response2["choices"][0]["message"]["content"]
asyncio.run(get_response()) asyncio.run(get_response())
except litellm.Timeout as e: except litellm.Timeout as e:
end_time = time.time()
print(f"timeout error occurred: {end_time - start_time}")
pass pass
except Exception as e: except Exception as e:
traceback.print_exc() traceback.print_exc()
@ -304,17 +323,6 @@ def test_function_calling_on_router():
] ]
response = router.completion(model="gpt-3.5-turbo", messages=messages, functions=function1) response = router.completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
print(f"final returned response: {response}") print(f"final returned response: {response}")
# async def get_response():
# messages=[
# {
# "role": "user",
# "content": "what's the weather in boston"
# }
# ],
# response1 = await router.acompletion(model="gpt-3.5-turbo", messages=messages, functions=function1)
# print(f"response1: {response1}")
# return response
# response = asyncio.run(get_response())
assert isinstance(response["choices"][0]["message"]["function_call"], dict) assert isinstance(response["choices"][0]["message"]["function_call"], dict)
except Exception as e: except Exception as e:
print(f"An exception occurred: {e}") print(f"An exception occurred: {e}")

View file

@ -1069,16 +1069,6 @@ def client(original_function):
try: try:
global callback_list, add_breadcrumb, user_logger_fn, Logging global callback_list, add_breadcrumb, user_logger_fn, Logging
function_id = kwargs["id"] if "id" in kwargs else None function_id = kwargs["id"] if "id" in kwargs else None
if litellm.client_session is None:
litellm.client_session = httpx.Client(
limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
timeout = None
)
if litellm.aclient_session is None:
litellm.aclient_session = httpx.AsyncClient(
limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
timeout = None
)
if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True): if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True):
print_verbose(f"litedebugger initialized") print_verbose(f"litedebugger initialized")
if "lite_debugger" not in litellm.input_callback: if "lite_debugger" not in litellm.input_callback: