forked from phoenix/litellm-mirror
feat(router.py): add 'get_model_info' helper function to get the model info for a specific model, based on it's id
This commit is contained in:
parent
fdc4fdb91a
commit
91971fa9e0
2 changed files with 15 additions and 8 deletions
|
@ -2590,6 +2590,16 @@ class Router:
|
|||
return model
|
||||
return None
|
||||
|
||||
def get_model_info(self, id: str) -> Optional[dict]:
|
||||
"""
|
||||
For a given model id, return the model info
|
||||
"""
|
||||
for model in self.model_list:
|
||||
if "model_info" in model and "id" in model["model_info"]:
|
||||
if id == model["model_info"]["id"]:
|
||||
return model
|
||||
return None
|
||||
|
||||
def get_model_ids(self):
|
||||
ids = []
|
||||
for model in self.model_list:
|
||||
|
@ -2904,15 +2914,10 @@ class Router:
|
|||
m for m in self.model_list if m["litellm_params"]["model"] == model
|
||||
]
|
||||
|
||||
verbose_router_logger.debug(
|
||||
f"initial list of deployments: {healthy_deployments}"
|
||||
)
|
||||
litellm.print_verbose(f"initial list of deployments: {healthy_deployments}")
|
||||
|
||||
verbose_router_logger.debug(
|
||||
f"healthy deployments: length {len(healthy_deployments)} {healthy_deployments}"
|
||||
)
|
||||
if len(healthy_deployments) == 0:
|
||||
raise ValueError(f"No healthy deployment available, passed model={model}")
|
||||
raise ValueError(f"No healthy deployment available, passed model={model}. ")
|
||||
if litellm.model_alias_map and model in litellm.model_alias_map:
|
||||
model = litellm.model_alias_map[
|
||||
model
|
||||
|
|
|
@ -79,10 +79,12 @@ class LowestTPMLoggingHandler_v2(CustomLogger):
|
|||
model=deployment.get("litellm_params", {}).get("model"),
|
||||
response=httpx.Response(
|
||||
status_code=429,
|
||||
content="{} rpm limit={}. current usage={}".format(
|
||||
content="{} rpm limit={}. current usage={}. id={}, model_group={}. Get the model info by calling 'router.get_model_info(id)".format(
|
||||
RouterErrors.user_defined_ratelimit_error.value,
|
||||
deployment_rpm,
|
||||
local_result,
|
||||
model_id,
|
||||
deployment.get("model_name", ""),
|
||||
),
|
||||
request=httpx.Request(method="tpm_rpm_limits", url="https://github.com/BerriAI/litellm"), # type: ignore
|
||||
),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue