diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 83bc11189..e67178c0d 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -5887,6 +5887,256 @@ async def view_spend_tags( ) +@router.get( + "/global/activity", + tags=["Budget & Spend Tracking"], + dependencies=[Depends(user_api_key_auth)], + responses={ + 200: {"model": List[LiteLLM_SpendLogs]}, + }, +) +async def get_global_activity( + start_date: Optional[str] = fastapi.Query( + default=None, + description="Time from which to start viewing spend", + ), + end_date: Optional[str] = fastapi.Query( + default=None, + description="Time till which to view spend", + ), +): + """ + Get number of API Requests, total tokens through proxy + + { + "daily_data": [ + const chartdata = [ + { + date: 'Jan 22', + api_requests: 10, + total_tokens: 2000 + }, + { + date: 'Jan 23', + api_requests: 10, + total_tokens: 12 + }, + ], + "sum_api_requests": 20, + "sum_total_tokens": 2012 + } + """ + from collections import defaultdict + + if start_date is None or end_date is None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={"error": "Please provide start_date and end_date"}, + ) + + start_date_obj = datetime.strptime(start_date, "%Y-%m-%d") + end_date_obj = datetime.strptime(end_date, "%Y-%m-%d") + + global prisma_client, llm_router + try: + if prisma_client is None: + raise Exception( + f"Database not connected. Connect a database to your proxy - https://docs.litellm.ai/docs/simple_proxy#managing-auth---virtual-keys" + ) + + sql_query = """ + SELECT + date_trunc('day', "startTime") AS date, + COUNT(*) AS api_requests, + SUM(total_tokens) AS total_tokens + FROM "LiteLLM_SpendLogs" + WHERE "startTime" BETWEEN $1::date AND $2::date + interval '1 day' + GROUP BY date_trunc('day', "startTime") + """ + db_response = await prisma_client.db.query_raw( + sql_query, start_date_obj, end_date_obj + ) + + if db_response is None: + return [] + + sum_api_requests = 0 + sum_total_tokens = 0 + daily_data = [] + for row in db_response: + # cast date to datetime + _date_obj = datetime.fromisoformat(row["date"]) + row["date"] = _date_obj.strftime("%b %d") + + daily_data.append(row) + sum_api_requests += row.get("api_requests", 0) + sum_total_tokens += row.get("total_tokens", 0) + + # sort daily_data by date + daily_data = sorted(daily_data, key=lambda x: x["date"]) + + data_to_return = { + "daily_data": daily_data, + "sum_api_requests": sum_api_requests, + "sum_total_tokens": sum_total_tokens, + } + + return data_to_return + + except Exception as e: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={"error": str(e)}, + ) + + +@router.get( + "/global/activity/model", + tags=["Budget & Spend Tracking"], + dependencies=[Depends(user_api_key_auth)], + responses={ + 200: {"model": List[LiteLLM_SpendLogs]}, + }, +) +async def get_global_activity_model( + start_date: Optional[str] = fastapi.Query( + default=None, + description="Time from which to start viewing spend", + ), + end_date: Optional[str] = fastapi.Query( + default=None, + description="Time till which to view spend", + ), +): + """ + Get number of API Requests, total tokens through proxy - Grouped by MODEL + + [ + { + "model": "gpt-4", + "daily_data": [ + const chartdata = [ + { + date: 'Jan 22', + api_requests: 10, + total_tokens: 2000 + }, + { + date: 'Jan 23', + api_requests: 10, + total_tokens: 12 + }, + ], + "sum_api_requests": 20, + "sum_total_tokens": 2012 + + }, + { + "model": "azure/gpt-4-turbo", + "daily_data": [ + const chartdata = [ + { + date: 'Jan 22', + api_requests: 10, + total_tokens: 2000 + }, + { + date: 'Jan 23', + api_requests: 10, + total_tokens: 12 + }, + ], + "sum_api_requests": 20, + "sum_total_tokens": 2012 + + }, + ] + """ + from collections import defaultdict + + if start_date is None or end_date is None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={"error": "Please provide start_date and end_date"}, + ) + + start_date_obj = datetime.strptime(start_date, "%Y-%m-%d") + end_date_obj = datetime.strptime(end_date, "%Y-%m-%d") + + global prisma_client, llm_router, premium_user + try: + if prisma_client is None: + raise Exception( + f"Database not connected. Connect a database to your proxy - https://docs.litellm.ai/docs/simple_proxy#managing-auth---virtual-keys" + ) + + sql_query = """ + SELECT + model, + date_trunc('day', "startTime") AS date, + COUNT(*) AS api_requests, + SUM(total_tokens) AS total_tokens + FROM "LiteLLM_SpendLogs" + WHERE "startTime" BETWEEN $1::date AND $2::date + interval '1 day' + GROUP BY model, date_trunc('day', "startTime") + + """ + db_response = await prisma_client.db.query_raw( + sql_query, start_date_obj, end_date_obj + ) + if db_response is None: + return [] + + model_ui_data: dict = ( + {} + ) # {"gpt-4": {"daily_data": [], "sum_api_requests": 0, "sum_total_tokens": 0}} + + for row in db_response: + _model = row["model"] + if _model not in model_ui_data: + model_ui_data[_model] = { + "daily_data": [], + "sum_api_requests": 0, + "sum_total_tokens": 0, + } + _date_obj = datetime.fromisoformat(row["date"]) + row["date"] = _date_obj.strftime("%b %d") + + model_ui_data[_model]["daily_data"].append(row) + model_ui_data[_model]["sum_api_requests"] += row.get("api_requests", 0) + model_ui_data[_model]["sum_total_tokens"] += row.get("total_tokens", 0) + + # sort mode ui data by sum_api_requests -> get top 10 models + model_ui_data = dict( + sorted( + model_ui_data.items(), + key=lambda x: x[1]["sum_api_requests"], + reverse=True, + )[:10] + ) + + response = [] + for model, data in model_ui_data.items(): + _sort_daily_data = sorted(data["daily_data"], key=lambda x: x["date"]) + + response.append( + { + "model": model, + "daily_data": _sort_daily_data, + "sum_api_requests": data["sum_api_requests"], + "sum_total_tokens": data["sum_total_tokens"], + } + ) + + return response + + except Exception as e: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={"error": str(e)}, + ) + + @router.get( "/global/spend/provider", tags=["Budget & Spend Tracking"], diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 4c21e7e91..e7678ba10 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -994,6 +994,7 @@ export const adminTopEndUsersCall = async ( throw error; } }; + export const adminspendByProvider = async (accessToken: String, keyToken: String | null, startTime: String | undefined, endTime: String | undefined) => { try { let url = proxyBaseUrl ? `${proxyBaseUrl}/global/spend/provider` : `/global/spend/provider`; @@ -1035,6 +1036,78 @@ export const adminspendByProvider = async (accessToken: String, keyToken: String } }; +export const adminGlobalActivity = async (accessToken: String, startTime: String | undefined, endTime: String | undefined) => { + try { + let url = proxyBaseUrl ? `${proxyBaseUrl}/global/activity` : `/global/activity`; + + if (startTime && endTime) { + url += `?start_date=${startTime}&end_date=${endTime}`; + } + + const requestOptions: { + method: string; + headers: { + Authorization: string; + }; + } = { + method: "GET", + headers: { + Authorization: `Bearer ${accessToken}`, + }, + }; + + const response = await fetch(url, requestOptions); + + if (!response.ok) { + const errorData = await response.text(); + throw new Error("Network response was not ok"); + } + const data = await response.json(); + console.log(data); + return data; + } catch (error) { + console.error("Failed to fetch spend data:", error); + throw error; + } +}; + + +export const adminGlobalActivityPerModel = async (accessToken: String, startTime: String | undefined, endTime: String | undefined) => { + try { + let url = proxyBaseUrl ? `${proxyBaseUrl}/global/activity/model` : `/global/activity/model`; + + if (startTime && endTime) { + url += `?start_date=${startTime}&end_date=${endTime}`; + } + + const requestOptions: { + method: string; + headers: { + Authorization: string; + }; + } = { + method: "GET", + headers: { + Authorization: `Bearer ${accessToken}`, + }, + }; + + const response = await fetch(url, requestOptions); + + if (!response.ok) { + const errorData = await response.text(); + throw new Error("Network response was not ok"); + } + const data = await response.json(); + console.log(data); + return data; + } catch (error) { + console.error("Failed to fetch spend data:", error); + throw error; + } +}; + + export const adminTopModelsCall = async (accessToken: String) => { try { let url = proxyBaseUrl diff --git a/ui/litellm-dashboard/src/components/usage.tsx b/ui/litellm-dashboard/src/components/usage.tsx index b0fdafd50..b236ae702 100644 --- a/ui/litellm-dashboard/src/components/usage.tsx +++ b/ui/litellm-dashboard/src/components/usage.tsx @@ -1,9 +1,16 @@ -import { BarChart, BarList, Card, Title, Table, TableHead, TableHeaderCell, TableRow, TableCell, TableBody, Metric } from "@tremor/react"; +import { BarChart, BarList, Card, Title, Table, TableHead, TableHeaderCell, TableRow, TableCell, TableBody, Metric, Subtitle } from "@tremor/react"; import React, { useState, useEffect } from "react"; import ViewUserSpend from "./view_user_spend"; -import { Grid, Col, Text, LineChart, TabPanel, TabPanels, TabGroup, TabList, Tab, Select, SelectItem, DateRangePicker, DateRangePickerValue, DonutChart} from "@tremor/react"; +import { + Grid, Col, Text, + LineChart, TabPanel, TabPanels, + TabGroup, TabList, Tab, Select, SelectItem, + DateRangePicker, DateRangePickerValue, + DonutChart, + AreaChart, +} from "@tremor/react"; import { userSpendLogsCall, keyInfoCall, @@ -17,6 +24,8 @@ import { modelAvailableCall, modelInfoCall, adminspendByProvider, + adminGlobalActivity, + adminGlobalActivityPerModel, } from "./networking"; import { start } from "repl"; @@ -28,6 +37,13 @@ interface UsagePageProps { keys: any[] | null; } +interface GlobalActivityData { + sum_api_requests: number; + sum_total_tokens: number; + daily_data: { date: string; api_requests: number; total_tokens: number }[]; +} + + type CustomTooltipTypeBar = { payload: any; active: boolean | undefined; @@ -117,6 +133,8 @@ const UsagePage: React.FC = ({ const [uniqueTeamIds, setUniqueTeamIds] = useState([]); const [totalSpendPerTeam, setTotalSpendPerTeam] = useState([]); const [spendByProvider, setSpendByProvider] = useState([]); + const [globalActivity, setGlobalActivity] = useState({} as GlobalActivityData); + const [globalActivityPerModel, setGlobalActivityPerModel] = useState([]); const [selectedKeyID, setSelectedKeyID] = useState(""); const [dateValue, setDateValue] = useState({ from: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000), @@ -265,6 +283,14 @@ const UsagePage: React.FC = ({ console.log("spend/user result", spend_user_call); + let global_activity_response = await adminGlobalActivity(accessToken, startTime, endTime); + setGlobalActivity(global_activity_response) + + let global_activity_per_model = await adminGlobalActivityPerModel(accessToken, startTime, endTime); + console.log("global activity per model", global_activity_per_model); + setGlobalActivityPerModel(global_activity_per_model) + + } else if (userRole == "App Owner") { await userSpendLogsCall( accessToken, @@ -326,6 +352,14 @@ const UsagePage: React.FC = ({ End User Usage Tag Based Usage + + + + + + Cost + Activity + @@ -420,6 +454,76 @@ const UsagePage: React.FC = ({ + + + + + All Up + + + API Requests {globalActivity.sum_api_requests} + console.log(v)} + /> + + + + Tokens {globalActivity.sum_total_tokens} + console.log(v)} + /> + + + + + + + {globalActivityPerModel.map((globalActivity, index) => ( + + {globalActivity.model} + + + API Requests {globalActivity.sum_api_requests} + console.log(v)} + /> + + + Tokens {globalActivity.sum_total_tokens} + console.log(v)} + /> + + + + ))} + + + + + + +