import React, { useState, useEffect } from "react"; import { Card, Title, Subtitle, Table, TableHead, TableRow, TableHeaderCell, TableCell, TableBody, Metric, Text, Grid, } from "@tremor/react"; import { TabPanel, TabPanels, TabGroup, TabList, Tab, TextInput } from "@tremor/react"; import { Select, SelectItem } from "@tremor/react"; import { modelInfoCall, userGetRequesedtModelsCall, modelMetricsCall, modelCreateCall, Model } from "./networking"; import { BarChart } from "@tremor/react"; import { Button as Button2, Modal, Form, Input, Select as Select2, InputNumber, message, Descriptions, Tooltip, Space, Row, Col, } from "antd"; import { Badge, BadgeDelta, Button } from "@tremor/react"; import RequestAccess from "./request_model_access"; import { Typography } from "antd"; const { Title: Title2, Link } = Typography; interface ModelDashboardProps { accessToken: string | null; token: string | null; userRole: string | null; userID: string | null; } const ModelDashboard: React.FC = ({ accessToken, token, userRole, userID, }) => { const [modelData, setModelData] = useState({ data: [] }); const [modelMetrics, setModelMetrics] = useState([]); const [pendingRequests, setPendingRequests] = useState([]); const [form] = Form.useForm(); const providers = ["OpenAI", "Azure OpenAI", "Anthropic", "Gemini (Google AI Studio)", "Amazon Bedrock", "OpenAI-Compatible Endpoints (Groq, Together AI, Mistral AI, etc.)"] const [selectedProvider, setSelectedProvider] = useState("OpenAI"); useEffect(() => { if (!accessToken || !token || !userRole || !userID) { return; } const fetchData = async () => { try { // Replace with your actual API call for model data const modelDataResponse = await modelInfoCall( accessToken, userID, userRole ); console.log("Model data response:", modelDataResponse.data); setModelData(modelDataResponse); const modelMetricsResponse = await modelMetricsCall( accessToken, userID, userRole ); console.log("Model metrics response:", modelMetricsResponse); setModelMetrics(modelMetricsResponse); // if userRole is Admin, show the pending requests if (userRole === "Admin" && accessToken) { const user_requests = await userGetRequesedtModelsCall(accessToken); console.log("Pending Requests:", pendingRequests); setPendingRequests(user_requests.requests || []); } } catch (error) { console.error("There was an error fetching the model data", error); } }; if (accessToken && token && userRole && userID) { fetchData(); } }, [accessToken, token, userRole, userID]); if (!modelData) { return
Loading...
; } if (!accessToken || !token || !userRole || !userID) { return
Loading...
; } let all_models_on_proxy: any[] = []; // loop through model data and edit each row for (let i = 0; i < modelData.data.length; i++) { let curr_model = modelData.data[i]; let litellm_model_name = curr_model?.litellm_params?.mode let model_info = curr_model?.model_info; let defaultProvider = "openai"; let provider = ""; let input_cost = "Undefined"; let output_cost = "Undefined"; let max_tokens = "Undefined"; // Check if litellm_model_name is null or undefined if (litellm_model_name) { // Split litellm_model_name based on "/" let splitModel = litellm_model_name.split("/"); // Get the first element in the split let firstElement = splitModel[0]; // If there is only one element, default provider to openai provider = splitModel.length === 1 ? defaultProvider : firstElement; } else { // litellm_model_name is null or undefined, default provider to openai provider = defaultProvider; } if (model_info) { input_cost = model_info?.input_cost_per_token; output_cost = model_info?.output_cost_per_token; max_tokens = model_info?.max_tokens; } modelData.data[i].provider = provider; modelData.data[i].input_cost = input_cost; modelData.data[i].output_cost = output_cost; modelData.data[i].max_tokens = max_tokens; modelData.data[i].api_base = curr_model?.litellm_params?.api_base; all_models_on_proxy.push(curr_model.model_name); console.log(modelData.data[i]); } // when users click request access show pop up to allow them to request access if (userRole && userRole == "Admin Viewer") { const { Title, Paragraph } = Typography; return (
Access Denied Ask your proxy admin for access to view all models
); } const handleSubmit = async (formValues: Record) => { const litellmParamsObj: Record = {}; const modelInfoObj: Record = {}; let modelName: string = ""; // Iterate through the key-value pairs in formValues for (const [key, value] of Object.entries(formValues)) { console.log(`key: ${key}, value: ${value}`) if (key == "model_name") { modelName = value } // Check if key is any of the specified API related keys if (key === "api_key" || key === "model" || key === "api_base" || key === "api_version" || key.startsWith("aws_")) { // Add key-value pair to litellm_params dictionary litellmParamsObj[key] = value; } // Check if key is "base_model" if (key === "base_model") { // Add key-value pair to model_info dictionary modelInfoObj[key] = value; } } console.log(`final new model: ${modelName}`) const new_model: Model = { "model_name": modelName, "litellm_params": litellmParamsObj, "model_info": modelInfoObj } const response: any = await modelCreateCall( accessToken, new_model ); console.log(`response for model create call: ${response["data"]}`); } const handleOk = () => { form .validateFields() .then((values) => { handleSubmit(values); form.resetFields(); }) .catch((error) => { console.error("Validation failed:", error); }); }; console.log(`selectedProvider: ${selectedProvider}`) return (
All Models Add Model Model Name Provider { userRole === "Admin" && ( API Base ) } Access Input Price per token ($) Output Price per token ($) Max Tokens {modelData.data.map((model: any) => ( {model.model_name} {model.provider} { userRole === "Admin" && ( {model.api_base} ) } {model.user_access ? ( Yes ) : ( )} {model.input_cost} {model.output_cost} {model.max_tokens} ))}
Model Statistics (Number Requests) Model Statistics (Latency) {/* {userRole === "Admin" && pendingRequests && pendingRequests.length > 0 ? ( Pending Requests User ID Requested Models Justification Justification {pendingRequests.map((request: any) => (

{request.user_id}

{request.models[0]}

{request.justification}

{request.user_id}

))}
) : null} */}
{/* */} Add new model
<> Model name your users will pass in Actual model name used for making litellm.completion() call { selectedProvider != "Amazon Bedrock" && } { selectedProvider == "Azure OpenAI" && } { selectedProvider == "Azure OpenAI" && } { selectedProvider == "Azure OpenAI" && The actual model your azure deployment uses. Used for accurate cost tracking. Select name from here } { selectedProvider == "Amazon Bedrock" && } { selectedProvider == "Amazon Bedrock" && } { selectedProvider == "Amazon Bedrock" && }
Add Model
Need Help?
); }; export default ModelDashboard;