forked from phoenix/litellm-mirror
fix - show correct base_model in slack alerts
This commit is contained in:
parent
f1a482f358
commit
be273b3c3b
3 changed files with 16 additions and 3 deletions
|
@ -671,11 +671,19 @@ class SlackAlerting(CustomLogger):
|
||||||
)
|
)
|
||||||
await _cache.async_set_cache(key=message, value="SENT", ttl=2419200)
|
await _cache.async_set_cache(key=message, value="SENT", ttl=2419200)
|
||||||
return
|
return
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
async def model_added_alert(self, model_name: str, litellm_model_name: str):
|
async def model_added_alert(
|
||||||
model_info = litellm.model_cost.get(litellm_model_name, {})
|
self, model_name: str, litellm_model_name: str, passed_model_info: Any
|
||||||
|
):
|
||||||
|
base_model_from_user = getattr(passed_model_info, "base_model", None)
|
||||||
|
model_info = {}
|
||||||
|
base_model = ""
|
||||||
|
if base_model_from_user is not None:
|
||||||
|
model_info = litellm.model_cost.get(base_model_from_user, {})
|
||||||
|
base_model = f"Base Model: `{base_model_from_user}`\n"
|
||||||
|
else:
|
||||||
|
model_info = litellm.model_cost.get(litellm_model_name, {})
|
||||||
model_info_str = ""
|
model_info_str = ""
|
||||||
for k, v in model_info.items():
|
for k, v in model_info.items():
|
||||||
if k == "input_cost_per_token" or k == "output_cost_per_token":
|
if k == "input_cost_per_token" or k == "output_cost_per_token":
|
||||||
|
@ -687,6 +695,7 @@ class SlackAlerting(CustomLogger):
|
||||||
message = f"""
|
message = f"""
|
||||||
*🚅 New Model Added*
|
*🚅 New Model Added*
|
||||||
Model Name: `{model_name}`
|
Model Name: `{model_name}`
|
||||||
|
{base_model}
|
||||||
|
|
||||||
Usage OpenAI Python SDK:
|
Usage OpenAI Python SDK:
|
||||||
```
|
```
|
||||||
|
|
|
@ -8088,6 +8088,7 @@ async def add_new_model(
|
||||||
await proxy_logging_obj.slack_alerting_instance.model_added_alert(
|
await proxy_logging_obj.slack_alerting_instance.model_added_alert(
|
||||||
model_name=model_params.model_name,
|
model_name=model_params.model_name,
|
||||||
litellm_model_name=_orignal_litellm_model_name,
|
litellm_model_name=_orignal_litellm_model_name,
|
||||||
|
passed_model_info=model_params.model_info,
|
||||||
)
|
)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -76,6 +76,9 @@ class ModelInfo(BaseModel):
|
||||||
db_model: bool = (
|
db_model: bool = (
|
||||||
False # used for proxy - to separate models which are stored in the db vs. config.
|
False # used for proxy - to separate models which are stored in the db vs. config.
|
||||||
)
|
)
|
||||||
|
base_model: Optional[str] = (
|
||||||
|
None # specify if the base model is azure/gpt-3.5-turbo etc for accurate cost tracking
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(self, id: Optional[Union[str, int]] = None, **params):
|
def __init__(self, id: Optional[Union[str, int]] = None, **params):
|
||||||
if id is None:
|
if id is None:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue