forked from phoenix/litellm-mirror
Merge pull request #3789 from BerriAI/litellm_ttft_ui
feat(schema.prisma): store model id + model group as part of spend logs allows precise model metrics
This commit is contained in:
commit
5a3aca10ce
7 changed files with 1335 additions and 801 deletions
|
@ -8606,6 +8606,102 @@ async def model_info_v2(
|
|||
return {"data": all_models}
|
||||
|
||||
|
||||
@router.get(
|
||||
"/model/streaming_metrics",
|
||||
description="View time to first token for models in spend logs",
|
||||
tags=["model management"],
|
||||
include_in_schema=False,
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
)
|
||||
async def model_streaming_metrics(
|
||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||
_selected_model_group: Optional[str] = None,
|
||||
startTime: Optional[datetime] = None,
|
||||
endTime: Optional[datetime] = None,
|
||||
):
|
||||
global prisma_client, llm_router
|
||||
if prisma_client is None:
|
||||
raise ProxyException(
|
||||
message=CommonProxyErrors.db_not_connected_error.value,
|
||||
type="internal_error",
|
||||
param="None",
|
||||
code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
|
||||
startTime = startTime or datetime.now() - timedelta(days=7) # show over past week
|
||||
endTime = endTime or datetime.now()
|
||||
|
||||
sql_query = """
|
||||
SELECT
|
||||
api_base,
|
||||
model_group,
|
||||
model,
|
||||
DATE_TRUNC('day', "startTime")::DATE AS day,
|
||||
AVG(EXTRACT(epoch FROM ("completionStartTime" - "startTime"))) AS time_to_first_token
|
||||
FROM
|
||||
"LiteLLM_SpendLogs"
|
||||
WHERE
|
||||
"startTime" BETWEEN $2::timestamp AND $3::timestamp
|
||||
AND "model_group" = $1 AND "cache_hit" != 'True'
|
||||
AND "completionStartTime" IS NOT NULL
|
||||
AND "completionStartTime" != "endTime"
|
||||
GROUP BY
|
||||
api_base,
|
||||
model_group,
|
||||
model,
|
||||
day
|
||||
ORDER BY
|
||||
time_to_first_token DESC;
|
||||
"""
|
||||
|
||||
_all_api_bases = set()
|
||||
db_response = await prisma_client.db.query_raw(
|
||||
sql_query, _selected_model_group, startTime, endTime
|
||||
)
|
||||
_daily_entries: dict = {} # {"Jun 23": {"model1": 0.002, "model2": 0.003}}
|
||||
if db_response is not None:
|
||||
for model_data in db_response:
|
||||
_api_base = model_data["api_base"]
|
||||
_model = model_data["model"]
|
||||
_day = model_data["day"]
|
||||
time_to_first_token = model_data["time_to_first_token"]
|
||||
if _day not in _daily_entries:
|
||||
_daily_entries[_day] = {}
|
||||
_combined_model_name = str(_model)
|
||||
if "https://" in _api_base:
|
||||
_combined_model_name = str(_api_base)
|
||||
if "/openai/" in _combined_model_name:
|
||||
_combined_model_name = _combined_model_name.split("/openai/")[0]
|
||||
|
||||
_all_api_bases.add(_combined_model_name)
|
||||
_daily_entries[_day][_combined_model_name] = time_to_first_token
|
||||
|
||||
"""
|
||||
each entry needs to be like this:
|
||||
{
|
||||
date: 'Jun 23',
|
||||
'gpt-4-https://api.openai.com/v1/': 0.002,
|
||||
'gpt-43-https://api.openai.com-12/v1/': 0.002,
|
||||
}
|
||||
"""
|
||||
# convert daily entries to list of dicts
|
||||
|
||||
response: List[dict] = []
|
||||
|
||||
# sort daily entries by date
|
||||
_daily_entries = dict(sorted(_daily_entries.items(), key=lambda item: item[0]))
|
||||
for day in _daily_entries:
|
||||
entry = {"date": str(day)}
|
||||
for model_key, latency in _daily_entries[day].items():
|
||||
entry[model_key] = latency
|
||||
response.append(entry)
|
||||
|
||||
return {
|
||||
"data": response,
|
||||
"all_api_bases": list(_all_api_bases),
|
||||
}
|
||||
|
||||
|
||||
@router.get(
|
||||
"/model/metrics",
|
||||
description="View number of requests & avg latency per model on config.yaml",
|
||||
|
@ -8633,6 +8729,7 @@ async def model_metrics(
|
|||
sql_query = """
|
||||
SELECT
|
||||
api_base,
|
||||
model_group,
|
||||
model,
|
||||
DATE_TRUNC('day', "startTime")::DATE AS day,
|
||||
AVG(EXTRACT(epoch FROM ("endTime" - "startTime"))) / SUM(total_tokens) AS avg_latency_per_token
|
||||
|
@ -8640,9 +8737,10 @@ async def model_metrics(
|
|||
"LiteLLM_SpendLogs"
|
||||
WHERE
|
||||
"startTime" BETWEEN $2::timestamp AND $3::timestamp
|
||||
AND "model" = $1 AND "cache_hit" != 'True'
|
||||
AND "model_group" = $1 AND "cache_hit" != 'True'
|
||||
GROUP BY
|
||||
api_base,
|
||||
model_group,
|
||||
model,
|
||||
day
|
||||
HAVING
|
||||
|
@ -8655,6 +8753,7 @@ async def model_metrics(
|
|||
sql_query, _selected_model_group, startTime, endTime
|
||||
)
|
||||
_daily_entries: dict = {} # {"Jun 23": {"model1": 0.002, "model2": 0.003}}
|
||||
|
||||
if db_response is not None:
|
||||
for model_data in db_response:
|
||||
_api_base = model_data["api_base"]
|
||||
|
@ -8738,7 +8837,7 @@ SELECT
|
|||
FROM
|
||||
"LiteLLM_SpendLogs"
|
||||
WHERE
|
||||
"model" = $2
|
||||
"model_group" = $2
|
||||
AND "cache_hit" != 'True'
|
||||
AND "startTime" >= $3::timestamp
|
||||
AND "startTime" <= $4::timestamp
|
||||
|
|
|
@ -177,6 +177,8 @@ model LiteLLM_SpendLogs {
|
|||
endTime DateTime // Assuming end_time is a DateTime field
|
||||
completionStartTime DateTime? // Assuming completionStartTime is a DateTime field
|
||||
model String @default("")
|
||||
model_id String? @default("") // the model id stored in proxy model db
|
||||
model_group String? @default("") // public model_name / model_group
|
||||
api_base String @default("")
|
||||
user String @default("")
|
||||
metadata Json @default("{}")
|
||||
|
|
|
@ -1874,6 +1874,9 @@ def get_logging_payload(
|
|||
# hash the api_key
|
||||
api_key = hash_token(api_key)
|
||||
|
||||
_model_id = metadata.get("model_info", {}).get("id", "")
|
||||
_model_group = metadata.get("model_group", "")
|
||||
|
||||
# clean up litellm metadata
|
||||
if isinstance(metadata, dict):
|
||||
clean_metadata = {}
|
||||
|
@ -1928,6 +1931,8 @@ def get_logging_payload(
|
|||
"request_tags": metadata.get("tags", []),
|
||||
"end_user": end_user_id or "",
|
||||
"api_base": litellm_params.get("api_base", ""),
|
||||
"model_group": _model_group,
|
||||
"model_id": _model_id,
|
||||
}
|
||||
|
||||
verbose_proxy_logger.debug("SpendTable: created payload - payload: %s\n\n", payload)
|
||||
|
|
|
@ -177,6 +177,8 @@ model LiteLLM_SpendLogs {
|
|||
endTime DateTime // Assuming end_time is a DateTime field
|
||||
completionStartTime DateTime? // Assuming completionStartTime is a DateTime field
|
||||
model String @default("")
|
||||
model_id String? @default("") // the model id stored in proxy model db
|
||||
model_group String? @default("") // public model_name / model_group
|
||||
api_base String @default("")
|
||||
user String @default("")
|
||||
metadata Json @default("{}")
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,29 @@
|
|||
import React from "react";
|
||||
import { LineChart } from "@tremor/react";
|
||||
interface TimeToFirstTokenProps {
|
||||
modelMetrics: any[];
|
||||
modelMetricsCategories: string[];
|
||||
customTooltip: any;
|
||||
}
|
||||
|
||||
const TimeToFirstToken: React.FC<TimeToFirstTokenProps> = ({
|
||||
modelMetrics,
|
||||
modelMetricsCategories,
|
||||
customTooltip,
|
||||
}) => {
|
||||
return (
|
||||
<LineChart
|
||||
title="Time to First token (s)"
|
||||
className="h-72"
|
||||
data={modelMetrics}
|
||||
index="date"
|
||||
showLegend={false}
|
||||
categories={modelMetricsCategories}
|
||||
colors={["indigo", "rose"]}
|
||||
connectNulls={true}
|
||||
customTooltip={customTooltip}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default TimeToFirstToken;
|
|
@ -473,6 +473,45 @@ export const modelMetricsCall = async (
|
|||
throw error;
|
||||
}
|
||||
};
|
||||
export const streamingModelMetricsCall = async (
|
||||
accessToken: String,
|
||||
modelGroup: String | null,
|
||||
startTime: String | undefined,
|
||||
endTime: String | undefined
|
||||
) => {
|
||||
/**
|
||||
* Get all models on proxy
|
||||
*/
|
||||
try {
|
||||
let url = proxyBaseUrl
|
||||
? `${proxyBaseUrl}/model/streaming_metrics`
|
||||
: `/model/streaming_metrics`;
|
||||
if (modelGroup) {
|
||||
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`;
|
||||
}
|
||||
// message.info("Requesting model data");
|
||||
const response = await fetch(url, {
|
||||
method: "GET",
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.text();
|
||||
message.error(errorData, 10);
|
||||
throw new Error("Network response was not ok");
|
||||
}
|
||||
const data = await response.json();
|
||||
// message.info("Received model data");
|
||||
return data;
|
||||
// Handle success - you might want to update some state or UI based on the created key
|
||||
} catch (error) {
|
||||
console.error("Failed to create key:", error);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
export const modelMetricsSlowResponsesCall = async (
|
||||
accessToken: String,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue