diff --git a/tests/litellm/proxy/spend_tracking/test_spend_management_endpoints.py b/tests/litellm/proxy/spend_tracking/test_spend_management_endpoints.py new file mode 100644 index 0000000000..415acdecee --- /dev/null +++ b/tests/litellm/proxy/spend_tracking/test_spend_management_endpoints.py @@ -0,0 +1,402 @@ +import datetime +import json +import os +import sys +from datetime import timezone + +import pytest +from fastapi.testclient import TestClient + +sys.path.insert( + 0, os.path.abspath("../../../..") +) # Adds the parent directory to the system path + +from litellm.proxy.proxy_server import app, prisma_client + + +@pytest.fixture +def client(): + return TestClient(app) + + +@pytest.mark.asyncio +async def test_ui_view_spend_logs_with_user_id(client, monkeypatch): + # Mock data for the test + mock_spend_logs = [ + { + "id": "log1", + "request_id": "req1", + "api_key": "sk-test-key", + "user": "test_user_1", + "team_id": "team1", + "spend": 0.05, + "startTime": datetime.datetime.now(timezone.utc).isoformat(), + "model": "gpt-3.5-turbo", + }, + { + "id": "log2", + "request_id": "req2", + "api_key": "sk-test-key", + "user": "test_user_2", + "team_id": "team1", + "spend": 0.10, + "startTime": datetime.datetime.now(timezone.utc).isoformat(), + "model": "gpt-4", + }, + ] + + # Create a mock prisma client + class MockDB: + async def find_many(self, *args, **kwargs): + # Filter based on user_id in the where conditions + print("kwargs to find_many", json.dumps(kwargs, indent=4)) + if ( + "where" in kwargs + and "user" in kwargs["where"] + and kwargs["where"]["user"] == "test_user_1" + ): + return [mock_spend_logs[0]] + return mock_spend_logs + + async def count(self, *args, **kwargs): + # Return count based on user_id filter + if ( + "where" in kwargs + and "user" in kwargs["where"] + and kwargs["where"]["user"] == "test_user_1" + ): + return 1 + return len(mock_spend_logs) + + class MockPrismaClient: + def __init__(self): + self.db = MockDB() + self.db.litellm_spendlogs = self.db + + # Apply the monkeypatch to replace the prisma_client + mock_prisma_client = MockPrismaClient() + monkeypatch.setattr("litellm.proxy.proxy_server.prisma_client", mock_prisma_client) + + # Set up test dates + start_date = ( + datetime.datetime.now(timezone.utc) - datetime.timedelta(days=7) + ).strftime("%Y-%m-%d %H:%M:%S") + end_date = datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + + # Make the request with user_id filter + response = client.get( + "/spend/logs/ui", + params={ + "user_id": "test_user_1", + "start_date": start_date, + "end_date": end_date, + }, + headers={"Authorization": "Bearer sk-test"}, + ) + + # Assert response + assert response.status_code == 200 + data = response.json() + + # Verify the response structure + assert "data" in data + assert "total" in data + assert "page" in data + assert "page_size" in data + assert "total_pages" in data + + # Verify the filtered data + assert data["total"] == 1 + assert len(data["data"]) == 1 + assert data["data"][0]["user"] == "test_user_1" + + +@pytest.mark.asyncio +async def test_ui_view_spend_logs_with_team_id(client, monkeypatch): + # Mock data for the test + mock_spend_logs = [ + { + "id": "log1", + "request_id": "req1", + "api_key": "sk-test-key", + "user": "test_user_1", + "team_id": "team1", + "spend": 0.05, + "startTime": datetime.datetime.now(timezone.utc).isoformat(), + "model": "gpt-3.5-turbo", + }, + { + "id": "log2", + "request_id": "req2", + "api_key": "sk-test-key", + "user": "test_user_2", + "team_id": "team2", + "spend": 0.10, + "startTime": datetime.datetime.now(timezone.utc).isoformat(), + "model": "gpt-4", + }, + ] + + # Create a mock prisma client + class MockDB: + async def find_many(self, *args, **kwargs): + # Filter based on team_id in the where conditions + if ( + "where" in kwargs + and "team_id" in kwargs["where"] + and kwargs["where"]["team_id"] == "team1" + ): + return [mock_spend_logs[0]] + return mock_spend_logs + + async def count(self, *args, **kwargs): + # Return count based on team_id filter + if ( + "where" in kwargs + and "team_id" in kwargs["where"] + and kwargs["where"]["team_id"] == "team1" + ): + return 1 + return len(mock_spend_logs) + + class MockPrismaClient: + def __init__(self): + self.db = MockDB() + self.db.litellm_spendlogs = self.db + + # Apply the monkeypatch + mock_prisma_client = MockPrismaClient() + monkeypatch.setattr("litellm.proxy.proxy_server.prisma_client", mock_prisma_client) + + # Set up test dates + start_date = ( + datetime.datetime.now(timezone.utc) - datetime.timedelta(days=7) + ).strftime("%Y-%m-%d %H:%M:%S") + end_date = datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + + # Make the request with team_id filter + response = client.get( + "/spend/logs/ui", + params={ + "team_id": "team1", + "start_date": start_date, + "end_date": end_date, + }, + headers={"Authorization": "Bearer sk-test"}, + ) + + # Assert response + assert response.status_code == 200 + data = response.json() + + # Verify the filtered data + assert data["total"] == 1 + assert len(data["data"]) == 1 + assert data["data"][0]["team_id"] == "team1" + + +@pytest.mark.asyncio +async def test_ui_view_spend_logs_pagination(client, monkeypatch): + # Create a larger set of mock data for pagination testing + mock_spend_logs = [ + { + "id": f"log{i}", + "request_id": f"req{i}", + "api_key": "sk-test-key", + "user": f"test_user_{i % 3}", + "team_id": f"team{i % 2 + 1}", + "spend": 0.05 * i, + "startTime": datetime.datetime.now(timezone.utc).isoformat(), + "model": "gpt-3.5-turbo" if i % 2 == 0 else "gpt-4", + } + for i in range(1, 26) # 25 records + ] + + # Create a mock prisma client with pagination support + class MockDB: + async def find_many(self, *args, **kwargs): + # Handle pagination + skip = kwargs.get("skip", 0) + take = kwargs.get("take", 10) + return mock_spend_logs[skip : skip + take] + + async def count(self, *args, **kwargs): + return len(mock_spend_logs) + + class MockPrismaClient: + def __init__(self): + self.db = MockDB() + self.db.litellm_spendlogs = self.db + + # Apply the monkeypatch + mock_prisma_client = MockPrismaClient() + monkeypatch.setattr("litellm.proxy.proxy_server.prisma_client", mock_prisma_client) + + # Set up test dates + start_date = ( + datetime.datetime.now(timezone.utc) - datetime.timedelta(days=7) + ).strftime("%Y-%m-%d %H:%M:%S") + end_date = datetime.datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S") + + # Test first page + response = client.get( + "/spend/logs/ui", + params={ + "page": 1, + "page_size": 10, + "start_date": start_date, + "end_date": end_date, + }, + headers={"Authorization": "Bearer sk-test"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["total"] == 25 + assert len(data["data"]) == 10 + assert data["page"] == 1 + assert data["page_size"] == 10 + assert data["total_pages"] == 3 + + # Test second page + response = client.get( + "/spend/logs/ui", + params={ + "page": 2, + "page_size": 10, + "start_date": start_date, + "end_date": end_date, + }, + headers={"Authorization": "Bearer sk-test"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["total"] == 25 + assert len(data["data"]) == 10 + assert data["page"] == 2 + + +@pytest.mark.asyncio +async def test_ui_view_spend_logs_date_range_filter(client, monkeypatch): + # Create mock data with different dates + today = datetime.datetime.now(timezone.utc) + + mock_spend_logs = [ + { + "id": "log1", + "request_id": "req1", + "api_key": "sk-test-key", + "user": "test_user_1", + "team_id": "team1", + "spend": 0.05, + "startTime": (today - datetime.timedelta(days=10)).isoformat(), + "model": "gpt-3.5-turbo", + }, + { + "id": "log2", + "request_id": "req2", + "api_key": "sk-test-key", + "user": "test_user_2", + "team_id": "team1", + "spend": 0.10, + "startTime": (today - datetime.timedelta(days=2)).isoformat(), + "model": "gpt-4", + }, + ] + + # Create a mock prisma client with date filtering + class MockDB: + async def find_many(self, *args, **kwargs): + # Check for date range filtering + if "where" in kwargs and "startTime" in kwargs["where"]: + date_filters = kwargs["where"]["startTime"] + filtered_logs = [] + + for log in mock_spend_logs: + log_date = datetime.datetime.fromisoformat( + log["startTime"].replace("Z", "+00:00") + ) + + # Apply gte filter if it exists + if "gte" in date_filters: + # Handle ISO format date strings + if "T" in date_filters["gte"]: + filter_date = datetime.datetime.fromisoformat( + date_filters["gte"].replace("Z", "+00:00") + ) + else: + filter_date = datetime.datetime.strptime( + date_filters["gte"], "%Y-%m-%d %H:%M:%S" + ) + + if log_date < filter_date: + continue + + # Apply lte filter if it exists + if "lte" in date_filters: + # Handle ISO format date strings + if "T" in date_filters["lte"]: + filter_date = datetime.datetime.fromisoformat( + date_filters["lte"].replace("Z", "+00:00") + ) + else: + filter_date = datetime.datetime.strptime( + date_filters["lte"], "%Y-%m-%d %H:%M:%S" + ) + + if log_date > filter_date: + continue + + filtered_logs.append(log) + + return filtered_logs + + return mock_spend_logs + + async def count(self, *args, **kwargs): + # For simplicity, we'll just call find_many and count the results + logs = await self.find_many(*args, **kwargs) + return len(logs) + + class MockPrismaClient: + def __init__(self): + self.db = MockDB() + self.db.litellm_spendlogs = self.db + + # Apply the monkeypatch + mock_prisma_client = MockPrismaClient() + monkeypatch.setattr("litellm.proxy.proxy_server.prisma_client", mock_prisma_client) + + # Test with a date range that should only include the second log + start_date = (today - datetime.timedelta(days=5)).strftime("%Y-%m-%d %H:%M:%S") + end_date = today.strftime("%Y-%m-%d %H:%M:%S") + + response = client.get( + "/spend/logs/ui", + params={ + "start_date": start_date, + "end_date": end_date, + }, + headers={"Authorization": "Bearer sk-test"}, + ) + + assert response.status_code == 200 + data = response.json() + assert data["total"] == 1 + assert len(data["data"]) == 1 + assert data["data"][0]["id"] == "log2" + + +@pytest.mark.asyncio +async def test_ui_view_spend_logs_unauthorized(client): + # Test without authorization header + response = client.get("/spend/logs/ui") + assert response.status_code == 401 or response.status_code == 403 + + # Test with invalid authorization + response = client.get( + "/spend/logs/ui", + headers={"Authorization": "Bearer invalid-token"}, + ) + assert response.status_code == 401 or response.status_code == 403 diff --git a/ui/litellm-dashboard/src/components/leftnav.tsx b/ui/litellm-dashboard/src/components/leftnav.tsx index 8de9bf1a2f..f836e69d8d 100644 --- a/ui/litellm-dashboard/src/components/leftnav.tsx +++ b/ui/litellm-dashboard/src/components/leftnav.tsx @@ -20,6 +20,7 @@ import { SafetyOutlined, ExperimentOutlined } from '@ant-design/icons'; +import { old_admin_roles, v2_admin_role_names, all_admin_roles, rolesAllowedToSeeUsage } from '../utils/roles'; const { Sider } = Layout; @@ -40,12 +41,6 @@ interface MenuItem { icon?: React.ReactNode; } -const old_admin_roles = ["Admin", "Admin Viewer"]; -const v2_admin_role_names = ["proxy_admin", "proxy_admin_viewer", "org_admin"]; -const all_admin_roles = [...old_admin_roles, ...v2_admin_role_names]; -const rolesAllowedToSeeUsage = ["Admin", "Admin Viewer", "Internal User", "Internal Viewer"]; - - // Note: If a menu item does not have a role, it is visible to all roles. const menuItems: MenuItem[] = [ { key: "1", page: "api-keys", label: "Virtual Keys", icon: }, @@ -57,7 +52,7 @@ const menuItems: MenuItem[] = [ { key: "5", page: "users", label: "Internal Users", icon: , roles: all_admin_roles }, { key: "14", page: "api_ref", label: "API Reference", icon: }, { key: "16", page: "model-hub", label: "Model Hub", icon: }, - { key: "15", page: "logs", label: "Logs", icon: , roles: all_admin_roles }, + { key: "15", page: "logs", label: "Logs", icon: }, { diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 828f92d763..4673e064f5 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -1806,8 +1806,7 @@ export const uiSpendLogsCall = async ( end_date?: string, page?: number, page_size?: number, - min_spend?: number, - max_spend?: number, + user_id?: string, ) => { try { // Construct base URL @@ -1817,13 +1816,12 @@ export const uiSpendLogsCall = async ( const queryParams = new URLSearchParams(); if (api_key) queryParams.append('api_key', api_key); if (team_id) queryParams.append('team_id', team_id); - if (min_spend) queryParams.append('min_spend', min_spend.toString()); - if (max_spend) queryParams.append('max_spend', max_spend.toString()); if (request_id) queryParams.append('request_id', request_id); if (start_date) queryParams.append('start_date', start_date); if (end_date) queryParams.append('end_date', end_date); if (page) queryParams.append('page', page.toString()); if (page_size) queryParams.append('page_size', page_size.toString()); + if (user_id) queryParams.append('user_id', user_id); // Append query parameters to URL if any exist const queryString = queryParams.toString(); diff --git a/ui/litellm-dashboard/src/components/view_logs/ConfigInfoMessage.tsx b/ui/litellm-dashboard/src/components/view_logs/ConfigInfoMessage.tsx new file mode 100644 index 0000000000..a9e8f7714a --- /dev/null +++ b/ui/litellm-dashboard/src/components/view_logs/ConfigInfoMessage.tsx @@ -0,0 +1,35 @@ +import React from 'react'; + +interface ConfigInfoMessageProps { + show: boolean; +} + +export const ConfigInfoMessage: React.FC = ({ show }) => { + if (!show) return null; + + return ( +
+
+ + + + + +
+
+

Request/Response Data Not Available

+

+ To view request and response details, enable prompt storage in your LiteLLM configuration by adding the following to your proxy_config.yaml file: +

+
+{`general_settings:
+  store_model_in_db: true
+  store_prompts_in_spend_logs: true`}
+        
+

+ Note: This will only affect new requests after the configuration change. +

+
+
+ ); +}; \ No newline at end of file diff --git a/ui/litellm-dashboard/src/components/view_logs/index.tsx b/ui/litellm-dashboard/src/components/view_logs/index.tsx index 4e44d05c04..db88e2acaa 100644 --- a/ui/litellm-dashboard/src/components/view_logs/index.tsx +++ b/ui/litellm-dashboard/src/components/view_logs/index.tsx @@ -10,6 +10,8 @@ import { Row } from "@tanstack/react-table"; import { prefetchLogDetails } from "./prefetch"; import { RequestResponsePanel } from "./columns"; import { ErrorViewer } from './ErrorViewer'; +import { internalUserRoles } from "../../utils/roles"; +import { ConfigInfoMessage } from './ConfigInfoMessage'; interface SpendLogsTableProps { accessToken: string | null; @@ -62,6 +64,9 @@ export default function SpendLogsTable({ const [selectedTeamId, setSelectedTeamId] = useState(""); const [selectedKeyHash, setSelectedKeyHash] = useState(""); const [selectedFilter, setSelectedFilter] = useState("Team ID"); + const [filterByCurrentUser, setFilterByCurrentUser] = useState( + userRole && internalUserRoles.includes(userRole) + ); const queryClient = useQueryClient(); @@ -93,6 +98,13 @@ export default function SpendLogsTable({ document.removeEventListener("mousedown", handleClickOutside); }, []); + + useEffect(() => { + if (userRole && internalUserRoles.includes(userRole)) { + setFilterByCurrentUser(true); + } + }, [userRole]); + const logs = useQuery({ queryKey: [ "logs", @@ -103,6 +115,7 @@ export default function SpendLogsTable({ endTime, selectedTeamId, selectedKeyHash, + filterByCurrentUser ? userID : null, ], queryFn: async () => { if (!accessToken || !token || !userRole || !userID) { @@ -130,7 +143,8 @@ export default function SpendLogsTable({ formattedStartTime, formattedEndTime, currentPage, - pageSize + pageSize, + filterByCurrentUser ? userID : undefined ); // Trigger prefetch for all logs @@ -600,6 +614,12 @@ function RequestViewer({ row }: { row: Row }) { const hasError = row.original.metadata?.status === "failure"; const errorInfo = hasError ? row.original.metadata?.error_information : null; + // Check if request/response data is missing + const hasMessages = row.original.messages && + (Array.isArray(row.original.messages) ? row.original.messages.length > 0 : Object.keys(row.original.messages).length > 0); + const hasResponse = row.original.response && Object.keys(formatData(row.original.response)).length > 0; + const missingData = !hasMessages || !hasResponse; + // Format the response with error details if present const formattedResponse = () => { if (hasError && errorInfo) { @@ -678,6 +698,9 @@ function RequestViewer({ row }: { row: Row }) { + {/* Configuration Info Message - Show when data is missing */} + + {/* Request/Response Panel */}
{/* Request Side */} @@ -688,6 +711,7 @@ function RequestViewer({ row }: { row: Row }) { onClick={() => navigator.clipboard.writeText(JSON.stringify(getRawRequest(), null, 2))} className="p-1 hover:bg-gray-200 rounded" title="Copy request" + disabled={!hasMessages} > @@ -715,6 +739,7 @@ function RequestViewer({ row }: { row: Row }) { onClick={() => navigator.clipboard.writeText(JSON.stringify(formattedResponse(), null, 2))} className="p-1 hover:bg-gray-200 rounded" title="Copy response" + disabled={!hasResponse} > @@ -723,16 +748,18 @@ function RequestViewer({ row }: { row: Row }) {
-
{JSON.stringify(formattedResponse(), null, 2)}
+ {hasResponse ? ( +
{JSON.stringify(formattedResponse(), null, 2)}
+ ) : ( +
Response data not available
+ )}
- {/* Error Card - Only show for failures */} {hasError && errorInfo && } - {/* Tags Card - Only show if there are tags */} {row.original.request_tags && Object.keys(row.original.request_tags).length > 0 && (
diff --git a/ui/litellm-dashboard/src/utils/roles.ts b/ui/litellm-dashboard/src/utils/roles.ts new file mode 100644 index 0000000000..8ac2d7632b --- /dev/null +++ b/ui/litellm-dashboard/src/utils/roles.ts @@ -0,0 +1,7 @@ +// Define admin roles and permissions +export const old_admin_roles = ["Admin", "Admin Viewer"]; +export const v2_admin_role_names = ["proxy_admin", "proxy_admin_viewer", "org_admin"]; +export const all_admin_roles = [...old_admin_roles, ...v2_admin_role_names]; + +export const internalUserRoles = ["Internal User", "Internal Viewer"]; +export const rolesAllowedToSeeUsage = ["Admin", "Admin Viewer", "Internal User", "Internal Viewer"]; \ No newline at end of file