diff --git a/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416151339_drop_tag_uniqueness_requirement/migration.sql b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416151339_drop_tag_uniqueness_requirement/migration.sql new file mode 100644 index 0000000000..5c27b84efb --- /dev/null +++ b/litellm-proxy-extras/litellm_proxy_extras/migrations/20250416151339_drop_tag_uniqueness_requirement/migration.sql @@ -0,0 +1,3 @@ +-- DropIndex +DROP INDEX "LiteLLM_DailyTagSpend_tag_key"; + diff --git a/litellm-proxy-extras/litellm_proxy_extras/schema.prisma b/litellm-proxy-extras/litellm_proxy_extras/schema.prisma index b21550413b..c6567c6d89 100644 --- a/litellm-proxy-extras/litellm_proxy_extras/schema.prisma +++ b/litellm-proxy-extras/litellm_proxy_extras/schema.prisma @@ -372,7 +372,7 @@ model LiteLLM_DailyTeamSpend { // Track daily team spend metrics per model and key model LiteLLM_DailyTagSpend { id String @id @default(uuid()) - tag String @unique + tag String date String api_key String model String diff --git a/litellm/proxy/management_endpoints/common_daily_activity.py b/litellm/proxy/management_endpoints/common_daily_activity.py new file mode 100644 index 0000000000..e5604ed79d --- /dev/null +++ b/litellm/proxy/management_endpoints/common_daily_activity.py @@ -0,0 +1,254 @@ +from datetime import datetime +from typing import Any, Dict, List, Optional, Union + +from fastapi import HTTPException, status + +from litellm._logging import verbose_proxy_logger +from litellm.proxy._types import CommonProxyErrors +from litellm.proxy.utils import PrismaClient +from litellm.types.proxy.management_endpoints.common_daily_activity import ( + BreakdownMetrics, + DailySpendData, + DailySpendMetadata, + KeyMetadata, + KeyMetricWithMetadata, + MetricWithMetadata, + SpendAnalyticsPaginatedResponse, + SpendMetrics, +) + + +def update_metrics(existing_metrics: SpendMetrics, record: Any) -> SpendMetrics: + """Update metrics with new record data.""" + existing_metrics.spend += record.spend + existing_metrics.prompt_tokens += record.prompt_tokens + existing_metrics.completion_tokens += record.completion_tokens + existing_metrics.total_tokens += record.prompt_tokens + record.completion_tokens + existing_metrics.cache_read_input_tokens += record.cache_read_input_tokens + existing_metrics.cache_creation_input_tokens += record.cache_creation_input_tokens + existing_metrics.api_requests += record.api_requests + existing_metrics.successful_requests += record.successful_requests + existing_metrics.failed_requests += record.failed_requests + return existing_metrics + + +def update_breakdown_metrics( + breakdown: BreakdownMetrics, + record: Any, + model_metadata: Dict[str, Dict[str, Any]], + provider_metadata: Dict[str, Dict[str, Any]], + api_key_metadata: Dict[str, Dict[str, Any]], + entity_id_field: Optional[str] = None, +) -> BreakdownMetrics: + """Updates breakdown metrics for a single record using the existing update_metrics function""" + + # Update model breakdown + if record.model not in breakdown.models: + breakdown.models[record.model] = MetricWithMetadata( + metrics=SpendMetrics(), + metadata=model_metadata.get( + record.model, {} + ), # Add any model-specific metadata here + ) + breakdown.models[record.model].metrics = update_metrics( + breakdown.models[record.model].metrics, record + ) + + # Update provider breakdown + provider = record.custom_llm_provider or "unknown" + if provider not in breakdown.providers: + breakdown.providers[provider] = MetricWithMetadata( + metrics=SpendMetrics(), + metadata=provider_metadata.get( + provider, {} + ), # Add any provider-specific metadata here + ) + breakdown.providers[provider].metrics = update_metrics( + breakdown.providers[provider].metrics, record + ) + + # Update api key breakdown + if record.api_key not in breakdown.api_keys: + breakdown.api_keys[record.api_key] = KeyMetricWithMetadata( + metrics=SpendMetrics(), + metadata=KeyMetadata( + key_alias=api_key_metadata.get(record.api_key, {}).get( + "key_alias", None + ) + ), # Add any api_key-specific metadata here + ) + breakdown.api_keys[record.api_key].metrics = update_metrics( + breakdown.api_keys[record.api_key].metrics, record + ) + + # Update entity-specific metrics if entity_id_field is provided + if entity_id_field: + entity_value = getattr(record, entity_id_field, None) + if entity_value: + if entity_value not in breakdown.entities: + breakdown.entities[entity_value] = MetricWithMetadata( + metrics=SpendMetrics(), metadata={} + ) + breakdown.entities[entity_value].metrics = update_metrics( + breakdown.entities[entity_value].metrics, record + ) + + return breakdown + + +async def get_daily_activity( + prisma_client: Optional[PrismaClient], + table_name: str, + entity_id_field: str, + entity_id: Optional[Union[str, List[str]]], + start_date: Optional[str], + end_date: Optional[str], + model: Optional[str], + api_key: Optional[str], + page: int, + page_size: int, +) -> SpendAnalyticsPaginatedResponse: + """Common function to get daily activity for any entity type.""" + if prisma_client is None: + raise HTTPException( + status_code=500, + detail={"error": CommonProxyErrors.db_not_connected_error.value}, + ) + + if start_date is None or end_date is None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={"error": "Please provide start_date and end_date"}, + ) + + try: + # Build filter conditions + where_conditions: Dict[str, Any] = { + "date": { + "gte": start_date, + "lte": end_date, + } + } + + if model: + where_conditions["model"] = model + if api_key: + where_conditions["api_key"] = api_key + if entity_id: + if isinstance(entity_id, list): + where_conditions[entity_id_field] = {"in": entity_id} + else: + where_conditions[entity_id_field] = entity_id + + # Get total count for pagination + total_count = await getattr(prisma_client.db, table_name).count( + where=where_conditions + ) + + # Fetch paginated results + daily_spend_data = await getattr(prisma_client.db, table_name).find_many( + where=where_conditions, + order=[ + {"date": "desc"}, + ], + skip=(page - 1) * page_size, + take=page_size, + ) + + # Get all unique API keys from the spend data + api_keys = set() + for record in daily_spend_data: + if record.api_key: + api_keys.add(record.api_key) + + # Fetch key aliases in bulk + api_key_metadata: Dict[str, Dict[str, Any]] = {} + model_metadata: Dict[str, Dict[str, Any]] = {} + provider_metadata: Dict[str, Dict[str, Any]] = {} + if api_keys: + key_records = await prisma_client.db.litellm_verificationtoken.find_many( + where={"token": {"in": list(api_keys)}} + ) + api_key_metadata.update( + {k.token: {"key_alias": k.key_alias} for k in key_records} + ) + + # Process results + results = [] + total_metrics = SpendMetrics() + grouped_data: Dict[str, Dict[str, Any]] = {} + + for record in daily_spend_data: + date_str = record.date + if date_str not in grouped_data: + grouped_data[date_str] = { + "metrics": SpendMetrics(), + "breakdown": BreakdownMetrics(), + } + + # Update metrics + grouped_data[date_str]["metrics"] = update_metrics( + grouped_data[date_str]["metrics"], record + ) + # Update breakdowns + grouped_data[date_str]["breakdown"] = update_breakdown_metrics( + grouped_data[date_str]["breakdown"], + record, + model_metadata, + provider_metadata, + api_key_metadata, + entity_id_field=entity_id_field, + ) + + # Update total metrics + total_metrics.spend += record.spend + total_metrics.prompt_tokens += record.prompt_tokens + total_metrics.completion_tokens += record.completion_tokens + total_metrics.total_tokens += ( + record.prompt_tokens + record.completion_tokens + ) + total_metrics.cache_read_input_tokens += record.cache_read_input_tokens + total_metrics.cache_creation_input_tokens += ( + record.cache_creation_input_tokens + ) + total_metrics.api_requests += record.api_requests + total_metrics.successful_requests += record.successful_requests + total_metrics.failed_requests += record.failed_requests + + # Convert grouped data to response format + for date_str, data in grouped_data.items(): + results.append( + DailySpendData( + date=datetime.strptime(date_str, "%Y-%m-%d").date(), + metrics=data["metrics"], + breakdown=data["breakdown"], + ) + ) + + # Sort results by date + results.sort(key=lambda x: x.date, reverse=True) + + return SpendAnalyticsPaginatedResponse( + results=results, + metadata=DailySpendMetadata( + total_spend=total_metrics.spend, + total_prompt_tokens=total_metrics.prompt_tokens, + total_completion_tokens=total_metrics.completion_tokens, + total_tokens=total_metrics.total_tokens, + total_api_requests=total_metrics.api_requests, + total_successful_requests=total_metrics.successful_requests, + total_failed_requests=total_metrics.failed_requests, + total_cache_read_input_tokens=total_metrics.cache_read_input_tokens, + total_cache_creation_input_tokens=total_metrics.cache_creation_input_tokens, + page=page, + total_pages=-(-total_count // page_size), # Ceiling division + has_more=(page * page_size) < total_count, + ), + ) + + except Exception as e: + verbose_proxy_logger.exception(f"Error fetching daily activity: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail={"error": f"Failed to fetch analytics: {str(e)}"}, + ) diff --git a/litellm/proxy/management_endpoints/internal_user_endpoints.py b/litellm/proxy/management_endpoints/internal_user_endpoints.py index 4a8f4edea3..a91bc2dc62 100644 --- a/litellm/proxy/management_endpoints/internal_user_endpoints.py +++ b/litellm/proxy/management_endpoints/internal_user_endpoints.py @@ -14,9 +14,8 @@ These are members of a Team on LiteLLM import asyncio import traceback import uuid -from datetime import date, datetime, timedelta, timezone -from enum import Enum -from typing import Any, Dict, List, Optional, TypedDict, Union, cast +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, List, Optional, Union, cast import fastapi from fastapi import APIRouter, Depends, Header, HTTPException, Request, status @@ -33,6 +32,17 @@ from litellm.proxy.management_endpoints.key_management_endpoints import ( from litellm.proxy.management_helpers.audit_logs import create_audit_log_for_update from litellm.proxy.management_helpers.utils import management_endpoint_wrapper from litellm.proxy.utils import handle_exception_on_proxy +from litellm.types.proxy.management_endpoints.common_daily_activity import ( + BreakdownMetrics, + DailySpendData, + DailySpendMetadata, + KeyMetadata, + KeyMetricWithMetadata, + LiteLLM_DailyUserSpend, + MetricWithMetadata, + SpendAnalyticsPaginatedResponse, + SpendMetrics, +) router = APIRouter() @@ -82,9 +92,9 @@ def _update_internal_new_user_params(data_json: dict, data: NewUserRequest) -> d data_json["user_id"] = str(uuid.uuid4()) auto_create_key = data_json.pop("auto_create_key", True) if auto_create_key is False: - data_json["table_name"] = ( - "user" # only create a user, don't create key if 'auto_create_key' set to False - ) + data_json[ + "table_name" + ] = "user" # only create a user, don't create key if 'auto_create_key' set to False is_internal_user = False if data.user_role and data.user_role.is_internal_user_role: @@ -651,9 +661,9 @@ def _update_internal_user_params(data_json: dict, data: UpdateUserRequest) -> di "budget_duration" not in non_default_values ): # applies internal user limits, if user role updated if is_internal_user and litellm.internal_user_budget_duration is not None: - non_default_values["budget_duration"] = ( - litellm.internal_user_budget_duration - ) + non_default_values[ + "budget_duration" + ] = litellm.internal_user_budget_duration duration_s = duration_in_seconds( duration=non_default_values["budget_duration"] ) @@ -964,13 +974,13 @@ async def get_users( "in": user_id_list, # Now passing a list of strings as required by Prisma } - users: Optional[List[LiteLLM_UserTable]] = ( - await prisma_client.db.litellm_usertable.find_many( - where=where_conditions, - skip=skip, - take=page_size, - order={"created_at": "desc"}, - ) + users: Optional[ + List[LiteLLM_UserTable] + ] = await prisma_client.db.litellm_usertable.find_many( + where=where_conditions, + skip=skip, + take=page_size, + order={"created_at": "desc"}, ) # Get total count of user rows @@ -1225,13 +1235,13 @@ async def ui_view_users( } # Query users with pagination and filters - users: Optional[List[BaseModel]] = ( - await prisma_client.db.litellm_usertable.find_many( - where=where_conditions, - skip=skip, - take=page_size, - order={"created_at": "desc"}, - ) + users: Optional[ + List[BaseModel] + ] = await prisma_client.db.litellm_usertable.find_many( + where=where_conditions, + skip=skip, + take=page_size, + order={"created_at": "desc"}, ) if not users: @@ -1244,111 +1254,6 @@ async def ui_view_users( raise HTTPException(status_code=500, detail=f"Error searching users: {str(e)}") -class GroupByDimension(str, Enum): - DATE = "date" - MODEL = "model" - API_KEY = "api_key" - TEAM = "team" - ORGANIZATION = "organization" - MODEL_GROUP = "model_group" - PROVIDER = "custom_llm_provider" - - -class SpendMetrics(BaseModel): - spend: float = Field(default=0.0) - prompt_tokens: int = Field(default=0) - completion_tokens: int = Field(default=0) - cache_read_input_tokens: int = Field(default=0) - cache_creation_input_tokens: int = Field(default=0) - total_tokens: int = Field(default=0) - successful_requests: int = Field(default=0) - failed_requests: int = Field(default=0) - api_requests: int = Field(default=0) - - -class MetricBase(BaseModel): - metrics: SpendMetrics - - -class MetricWithMetadata(MetricBase): - metadata: Dict[str, Any] = Field(default_factory=dict) - - -class KeyMetadata(BaseModel): - """Metadata for a key""" - - key_alias: Optional[str] = None - - -class KeyMetricWithMetadata(MetricBase): - """Base class for metrics with additional metadata""" - - metadata: KeyMetadata = Field(default_factory=KeyMetadata) - - -class BreakdownMetrics(BaseModel): - """Breakdown of spend by different dimensions""" - - models: Dict[str, MetricWithMetadata] = Field( - default_factory=dict - ) # model -> {metrics, metadata} - providers: Dict[str, MetricWithMetadata] = Field( - default_factory=dict - ) # provider -> {metrics, metadata} - api_keys: Dict[str, KeyMetricWithMetadata] = Field( - default_factory=dict - ) # api_key -> {metrics, metadata} - - -class DailySpendData(BaseModel): - date: date - metrics: SpendMetrics - breakdown: BreakdownMetrics = Field(default_factory=BreakdownMetrics) - - -class DailySpendMetadata(BaseModel): - total_spend: float = Field(default=0.0) - total_prompt_tokens: int = Field(default=0) - total_completion_tokens: int = Field(default=0) - total_tokens: int = Field(default=0) - total_api_requests: int = Field(default=0) - total_successful_requests: int = Field(default=0) - total_failed_requests: int = Field(default=0) - total_cache_read_input_tokens: int = Field(default=0) - total_cache_creation_input_tokens: int = Field(default=0) - page: int = Field(default=1) - total_pages: int = Field(default=1) - has_more: bool = Field(default=False) - - -class SpendAnalyticsPaginatedResponse(BaseModel): - results: List[DailySpendData] - metadata: DailySpendMetadata = Field(default_factory=DailySpendMetadata) - - -class LiteLLM_DailyUserSpend(BaseModel): - id: str - user_id: str - date: str - api_key: str - model: str - model_group: Optional[str] = None - custom_llm_provider: Optional[str] = None - prompt_tokens: int = 0 - completion_tokens: int = 0 - cache_read_input_tokens: int = 0 - cache_creation_input_tokens: int = 0 - spend: float = 0.0 - api_requests: int = 0 - successful_requests: int = 0 - failed_requests: int = 0 - - -class GroupedData(TypedDict): - metrics: SpendMetrics - breakdown: BreakdownMetrics - - def update_metrics( group_metrics: SpendMetrics, record: LiteLLM_DailyUserSpend ) -> SpendMetrics: @@ -1494,9 +1399,9 @@ async def get_user_daily_activity( user_api_key_dict.user_role != LitellmUserRoles.PROXY_ADMIN and user_api_key_dict.user_role != LitellmUserRoles.PROXY_ADMIN_VIEW_ONLY ): - where_conditions["user_id"] = ( - user_api_key_dict.user_id - ) # only allow access to own data + where_conditions[ + "user_id" + ] = user_api_key_dict.user_id # only allow access to own data # Get total count for pagination total_count = await prisma_client.db.litellm_dailyuserspend.count( diff --git a/litellm/proxy/management_endpoints/tag_management_endpoints.py b/litellm/proxy/management_endpoints/tag_management_endpoints.py index 014a1f3c57..551c161656 100644 --- a/litellm/proxy/management_endpoints/tag_management_endpoints.py +++ b/litellm/proxy/management_endpoints/tag_management_endpoints.py @@ -12,7 +12,7 @@ All /tag management endpoints import datetime import json -from typing import Dict +from typing import Dict, Optional from fastapi import APIRouter, Depends, HTTPException @@ -20,6 +20,10 @@ from litellm._logging import verbose_proxy_logger from litellm.litellm_core_utils.safe_json_dumps import safe_dumps from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth +from litellm.proxy.management_endpoints.common_daily_activity import ( + SpendAnalyticsPaginatedResponse, + get_daily_activity, +) from litellm.types.tag_management import ( TagConfig, TagDeleteRequest, @@ -354,3 +358,51 @@ async def delete_tag( return {"message": f"Tag {data.name} deleted successfully"} except Exception as e: raise HTTPException(status_code=500, detail=str(e)) + + +@router.get( + "/tag/daily/activity", + response_model=SpendAnalyticsPaginatedResponse, + tags=["tag management"], +) +async def get_tag_daily_activity( + tags: Optional[str] = None, + start_date: Optional[str] = None, + end_date: Optional[str] = None, + model: Optional[str] = None, + api_key: Optional[str] = None, + page: int = 1, + page_size: int = 10, +): + """ + Get daily activity for specific tags or all tags. + + Args: + tags (Optional[str]): Comma-separated list of tags to filter by. If not provided, returns data for all tags. + start_date (Optional[str]): Start date for the activity period (YYYY-MM-DD). + end_date (Optional[str]): End date for the activity period (YYYY-MM-DD). + model (Optional[str]): Filter by model name. + api_key (Optional[str]): Filter by API key. + page (int): Page number for pagination. + page_size (int): Number of items per page. + + Returns: + SpendAnalyticsPaginatedResponse: Paginated response containing daily activity data. + """ + from litellm.proxy.proxy_server import prisma_client + + # Convert comma-separated tags string to list if provided + tag_list = tags.split(",") if tags else None + + return await get_daily_activity( + prisma_client=prisma_client, + table_name="litellm_dailytagspend", + entity_id_field="tag", + entity_id=tag_list, + start_date=start_date, + end_date=end_date, + model=model, + api_key=api_key, + page=page, + page_size=page_size, + ) diff --git a/litellm/proxy/schema.prisma b/litellm/proxy/schema.prisma index b21550413b..c6567c6d89 100644 --- a/litellm/proxy/schema.prisma +++ b/litellm/proxy/schema.prisma @@ -372,7 +372,7 @@ model LiteLLM_DailyTeamSpend { // Track daily team spend metrics per model and key model LiteLLM_DailyTagSpend { id String @id @default(uuid()) - tag String @unique + tag String date String api_key String model String diff --git a/litellm/types/proxy/management_endpoints/common_daily_activity.py b/litellm/types/proxy/management_endpoints/common_daily_activity.py new file mode 100644 index 0000000000..9408035746 --- /dev/null +++ b/litellm/types/proxy/management_endpoints/common_daily_activity.py @@ -0,0 +1,113 @@ +from datetime import date +from enum import Enum +from typing import Any, Dict, List, Optional, TypedDict + +from pydantic import BaseModel, Field + + +class GroupByDimension(str, Enum): + DATE = "date" + MODEL = "model" + API_KEY = "api_key" + TEAM = "team" + ORGANIZATION = "organization" + MODEL_GROUP = "model_group" + PROVIDER = "custom_llm_provider" + + +class SpendMetrics(BaseModel): + spend: float = Field(default=0.0) + prompt_tokens: int = Field(default=0) + completion_tokens: int = Field(default=0) + cache_read_input_tokens: int = Field(default=0) + cache_creation_input_tokens: int = Field(default=0) + total_tokens: int = Field(default=0) + successful_requests: int = Field(default=0) + failed_requests: int = Field(default=0) + api_requests: int = Field(default=0) + + +class MetricBase(BaseModel): + metrics: SpendMetrics + + +class MetricWithMetadata(MetricBase): + metadata: Dict[str, Any] = Field(default_factory=dict) + + +class KeyMetadata(BaseModel): + """Metadata for a key""" + + key_alias: Optional[str] = None + + +class KeyMetricWithMetadata(MetricBase): + """Base class for metrics with additional metadata""" + + metadata: KeyMetadata = Field(default_factory=KeyMetadata) + + +class BreakdownMetrics(BaseModel): + """Breakdown of spend by different dimensions""" + + models: Dict[str, MetricWithMetadata] = Field( + default_factory=dict + ) # model -> {metrics, metadata} + providers: Dict[str, MetricWithMetadata] = Field( + default_factory=dict + ) # provider -> {metrics, metadata} + api_keys: Dict[str, KeyMetricWithMetadata] = Field( + default_factory=dict + ) # api_key -> {metrics, metadata} + entities: Dict[str, MetricWithMetadata] = Field( + default_factory=dict + ) # entity -> {metrics, metadata} + + +class DailySpendData(BaseModel): + date: date + metrics: SpendMetrics + breakdown: BreakdownMetrics = Field(default_factory=BreakdownMetrics) + + +class DailySpendMetadata(BaseModel): + total_spend: float = Field(default=0.0) + total_prompt_tokens: int = Field(default=0) + total_completion_tokens: int = Field(default=0) + total_tokens: int = Field(default=0) + total_api_requests: int = Field(default=0) + total_successful_requests: int = Field(default=0) + total_failed_requests: int = Field(default=0) + total_cache_read_input_tokens: int = Field(default=0) + total_cache_creation_input_tokens: int = Field(default=0) + page: int = Field(default=1) + total_pages: int = Field(default=1) + has_more: bool = Field(default=False) + + +class SpendAnalyticsPaginatedResponse(BaseModel): + results: List[DailySpendData] + metadata: DailySpendMetadata = Field(default_factory=DailySpendMetadata) + + +class LiteLLM_DailyUserSpend(BaseModel): + id: str + user_id: str + date: str + api_key: str + model: str + model_group: Optional[str] = None + custom_llm_provider: Optional[str] = None + prompt_tokens: int = 0 + completion_tokens: int = 0 + cache_read_input_tokens: int = 0 + cache_creation_input_tokens: int = 0 + spend: float = 0.0 + api_requests: int = 0 + successful_requests: int = 0 + failed_requests: int = 0 + + +class GroupedData(TypedDict): + metrics: SpendMetrics + breakdown: BreakdownMetrics diff --git a/schema.prisma b/schema.prisma index b21550413b..c6567c6d89 100644 --- a/schema.prisma +++ b/schema.prisma @@ -372,7 +372,7 @@ model LiteLLM_DailyTeamSpend { // Track daily team spend metrics per model and key model LiteLLM_DailyTagSpend { id String @id @default(uuid()) - tag String @unique + tag String date String api_key String model String diff --git a/ui/litellm-dashboard/src/components/entity_usage.tsx b/ui/litellm-dashboard/src/components/entity_usage.tsx new file mode 100644 index 0000000000..ad9557616e --- /dev/null +++ b/ui/litellm-dashboard/src/components/entity_usage.tsx @@ -0,0 +1,477 @@ +import React, { useState, useEffect } from "react"; +import { + BarChart, Card, Title, Text, + Grid, Col, DateRangePicker, DateRangePickerValue, + Table, TableHead, TableRow, TableHeaderCell, TableBody, TableCell, + DonutChart +} from "@tremor/react"; +import { Select } from 'antd'; +import { ActivityMetrics, processActivityData } from './activity_metrics'; +import { SpendMetrics, DailyData } from './usage/types'; +import { tagDailyActivityCall } from './networking'; + +interface EntityMetrics { + metrics: { + spend: number; + prompt_tokens: number; + completion_tokens: number; + cache_read_input_tokens: number; + cache_creation_input_tokens: number; + total_tokens: number; + successful_requests: number; + failed_requests: number; + api_requests: number; + }; + metadata: Record; +} + +interface BreakdownMetrics { + models: Record; + providers: Record; + api_keys: Record; + entities: Record; +} + +interface ExtendedDailyData extends DailyData { + breakdown: BreakdownMetrics; +} + +interface EntitySpendData { + results: ExtendedDailyData[]; + metadata: { + total_spend: number; + total_api_requests: number; + total_successful_requests: number; + total_failed_requests: number; + total_tokens: number; + }; +} + +interface EntityUsageProps { + accessToken: string | null; + entityType: 'tag' | 'team'; + entityId?: string | null; +} + +const EntityUsage: React.FC = ({ + accessToken, + entityType, + entityId +}) => { + const [spendData, setSpendData] = useState({ + results: [], + metadata: { + total_spend: 0, + total_api_requests: 0, + total_successful_requests: 0, + total_failed_requests: 0, + total_tokens: 0 + } + }); + + const [selectedTags, setSelectedTags] = useState([]); + const [dateValue, setDateValue] = useState({ + from: new Date(Date.now() - 28 * 24 * 60 * 60 * 1000), + to: new Date(), + }); + + const fetchSpendData = async () => { + if (!accessToken || !dateValue.from || !dateValue.to) return; + const startTime = dateValue.from; + const endTime = dateValue.to; + + if (entityType === 'tag') { + const data = await tagDailyActivityCall( + accessToken, + startTime, + endTime, + 1, + selectedTags.length > 0 ? selectedTags : null + ); + setSpendData(data); + } else { + throw new Error("Invalid entity type"); + } + }; + + useEffect(() => { + fetchSpendData(); + }, [accessToken, dateValue, entityId, selectedTags]); + + const getTopModels = () => { + const modelSpend: { [key: string]: any } = {}; + spendData.results.forEach(day => { + Object.entries(day.breakdown.models || {}).forEach(([model, metrics]) => { + if (!modelSpend[model]) { + modelSpend[model] = { + spend: 0, + requests: 0, + successful_requests: 0, + failed_requests: 0, + tokens: 0 + }; + } + try { + modelSpend[model].spend += metrics.metrics.spend; + } catch (e) { + console.log(`Error adding spend for ${model}: ${e}, got metrics: ${JSON.stringify(metrics)}`); + } + modelSpend[model].requests += metrics.metrics.api_requests; + modelSpend[model].successful_requests += metrics.metrics.successful_requests; + modelSpend[model].failed_requests += metrics.metrics.failed_requests; + modelSpend[model].tokens += metrics.metrics.total_tokens; + }); + }); + + return Object.entries(modelSpend) + .map(([model, metrics]) => ({ + key: model, + ...metrics + })) + .sort((a, b) => b.spend - a.spend) + .slice(0, 5); + }; + + const getTopApiKeys = () => { + const apiKeySpend: { [key: string]: any } = {}; + spendData.results.forEach(day => { + Object.entries(day.breakdown.api_keys || {}).forEach(([key, metrics]) => { + if (!apiKeySpend[key]) { + apiKeySpend[key] = { + key: key, + spend: 0, + requests: 0, + successful_requests: 0, + failed_requests: 0, + tokens: 0 + }; + } + apiKeySpend[key].spend += metrics.metrics.spend; + apiKeySpend[key].requests += metrics.metrics.api_requests; + apiKeySpend[key].successful_requests += metrics.metrics.successful_requests; + apiKeySpend[key].failed_requests += metrics.metrics.failed_requests; + apiKeySpend[key].tokens += metrics.metrics.total_tokens; + }); + }); + + return Object.values(apiKeySpend) + .sort((a, b) => b.spend - a.spend) + .slice(0, 5); + }; + + const getProviderSpend = () => { + const providerSpend: { [key: string]: any } = {}; + spendData.results.forEach(day => { + Object.entries(day.breakdown.providers || {}).forEach(([provider, metrics]) => { + if (!providerSpend[provider]) { + providerSpend[provider] = { + provider, + spend: 0, + requests: 0, + successful_requests: 0, + failed_requests: 0, + tokens: 0 + }; + } + try { + providerSpend[provider].spend += metrics.metrics.spend; + providerSpend[provider].requests += metrics.metrics.api_requests; + providerSpend[provider].successful_requests += metrics.metrics.successful_requests; + providerSpend[provider].failed_requests += metrics.metrics.failed_requests; + providerSpend[provider].tokens += metrics.metrics.total_tokens; + } catch (e) { + console.log(`Error processing provider ${provider}: ${e}`); + } + }); + }); + + return Object.values(providerSpend) + .filter(provider => provider.spend > 0) + .sort((a, b) => b.spend - a.spend); + }; + + const getAllTags = () => { + const tags = new Set(); + spendData.results.forEach(day => { + Object.keys(day.breakdown.entities || {}).forEach(tag => { + tags.add(tag); + }); + }); + return Array.from(tags).map(tag => ({ + label: tag, + value: tag + })); + }; + + const filterDataByTags = (data: any[]) => { + if (selectedTags.length === 0) return data; + return data.filter(item => selectedTags.includes(item.entity)); + }; + + const getEntityBreakdown = () => { + const entitySpend: { [key: string]: any } = {}; + spendData.results.forEach(day => { + Object.entries(day.breakdown.entities || {}).forEach(([entity, data]) => { + if (!entitySpend[entity]) { + entitySpend[entity] = { + entity, + spend: 0, + requests: 0, + successful_requests: 0, + failed_requests: 0, + tokens: 0 + }; + } + entitySpend[entity].spend += data.metrics.spend; + entitySpend[entity].requests += data.metrics.api_requests; + entitySpend[entity].successful_requests += data.metrics.successful_requests; + entitySpend[entity].failed_requests += data.metrics.failed_requests; + entitySpend[entity].tokens += data.metrics.total_tokens; + }); + }); + + const result = Object.values(entitySpend) + .sort((a, b) => b.spend - a.spend); + + return filterDataByTags(result); + }; + + return ( +
+ + + Select Time Range + + + + Filter by {entityType === 'tag' ? 'Tags' : 'Teams'} +