mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
feat - use api to get prometheus api metrics
This commit is contained in:
parent
35de40e6eb
commit
28b930c0b7
1 changed files with 30 additions and 17 deletions
|
@ -7,6 +7,7 @@ import os
|
|||
import time
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_logger
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
|
||||
|
||||
PROMETHEUS_URL = litellm.get_secret("PROMETHEUS_URL")
|
||||
|
@ -23,31 +24,43 @@ async def get_metric_from_prometheus(
|
|||
"PROMETHEUS_URL not set please set 'PROMETHEUS_URL=<>' in .env"
|
||||
)
|
||||
|
||||
start_of_day = int(time.time()) - (int(time.time()) % 86400)
|
||||
query = metric_name
|
||||
query = f"{metric_name}[24h]"
|
||||
now = int(time.time())
|
||||
response = await async_http_handler.get(
|
||||
f"{PROMETHEUS_URL}/api/v1/query", params={"query": query}
|
||||
f"{PROMETHEUS_URL}/api/v1/query", params={"query": query, "time": now}
|
||||
) # End of the day
|
||||
_json_response = response.json()
|
||||
verbose_logger.debug("json response from prometheus /query api %s", _json_response)
|
||||
results = response.json()["data"]["result"]
|
||||
return results
|
||||
|
||||
|
||||
async def get_fallback_metric_from_prometheus():
|
||||
response_json = await get_metric_from_prometheus(
|
||||
metric_name="llm_deployment_successful_fallbacks_total"
|
||||
)
|
||||
|
||||
if not response_json:
|
||||
return "No fallback data available."
|
||||
|
||||
"""
|
||||
Gets fallback metrics from prometheus for the last 24 hours
|
||||
"""
|
||||
response_message = ""
|
||||
for result in response_json:
|
||||
result = response_json
|
||||
metric = result["metric"]
|
||||
value = int(float(result["value"][1])) # Convert value to integer
|
||||
relevant_metrics = [
|
||||
"llm_deployment_successful_fallbacks_total",
|
||||
"llm_deployment_failed_fallbacks_total",
|
||||
]
|
||||
for metric in relevant_metrics:
|
||||
response_json = await get_metric_from_prometheus(
|
||||
metric_name=metric,
|
||||
)
|
||||
|
||||
primary_model = metric.get("primary_model", "Unknown")
|
||||
fallback_model = metric.get("fallback_model", "Unknown")
|
||||
response_message += f"`{value} successful fallback requests` with `primary model={primary_model} -> fallback model={fallback_model}`"
|
||||
if response_json:
|
||||
verbose_logger.debug("response json %s", response_json)
|
||||
for result in response_json:
|
||||
verbose_logger.debug("result= %s", result)
|
||||
metric = result["metric"]
|
||||
metric_values = result["values"]
|
||||
most_recent_value = metric_values[0]
|
||||
|
||||
value = int(float(most_recent_value[1])) # Convert value to integer
|
||||
primary_model = metric.get("primary_model", "Unknown")
|
||||
fallback_model = metric.get("fallback_model", "Unknown")
|
||||
response_message += f"`{value} successful fallback requests` with primary model=`{primary_model}` -> fallback model=`{fallback_model}`"
|
||||
response_message += "\n"
|
||||
verbose_logger.debug("response message %s", response_message)
|
||||
return response_message
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue