import React, { useState, useEffect } from "react"; import { Card, Title, Subtitle, Table, TableHead, TableRow, TableHeaderCell, TableCell, TableBody, Metric, Text, Grid, } from "@tremor/react"; import { TabPanel, TabPanels, TabGroup, TabList, Tab, TextInput, Icon } from "@tremor/react"; import { Select, SelectItem, MultiSelect, MultiSelectItem } from "@tremor/react"; import { modelInfoCall, userGetRequesedtModelsCall, modelMetricsCall, modelCreateCall, Model, modelCostMap, modelDeleteCall } from "./networking"; import { BarChart } from "@tremor/react"; import { Button as Button2, Modal, Form, Input, Select as Select2, InputNumber, message, Descriptions, Tooltip, Space, Row, Col, } from "antd"; import { Badge, BadgeDelta, Button } from "@tremor/react"; import RequestAccess from "./request_model_access"; import { Typography } from "antd"; import TextArea from "antd/es/input/TextArea"; import { InformationCircleIcon, PencilAltIcon, PencilIcon, StatusOnlineIcon, TrashIcon } from "@heroicons/react/outline"; const { Title: Title2, Link } = Typography; interface ModelDashboardProps { accessToken: string | null; token: string | null; userRole: string | null; userID: string | null; } //["OpenAI", "Azure OpenAI", "Anthropic", "Gemini (Google AI Studio)", "Amazon Bedrock", "OpenAI-Compatible Endpoints (Groq, Together AI, Mistral AI, etc.)"] enum Providers { OpenAI = "OpenAI", Azure = "Azure", Anthropic = "Anthropic", Google_AI_Studio = "Gemini (Google AI Studio)", Bedrock = "Amazon Bedrock", OpenAI_Compatible = "OpenAI-Compatible Endpoints (Groq, Together AI, Mistral AI, etc.)" } const provider_map: Record = { "OpenAI": "openai", "Azure": "azure", "Anthropic": "anthropic", "Google_AI_Studio": "gemini", "Bedrock": "bedrock", "OpenAI_Compatible": "openai" }; const ModelDashboard: React.FC = ({ accessToken, token, userRole, userID, }) => { const [modelData, setModelData] = useState({ data: [] }); const [modelMetrics, setModelMetrics] = useState([]); const [pendingRequests, setPendingRequests] = useState([]); const [form] = Form.useForm(); const [modelMap, setModelMap] = useState(null); const [providerModels, setProviderModels] = useState>([]); // Explicitly typing providerModels as a string array const providers: Providers[] = [Providers.OpenAI, Providers.Azure, Providers.Anthropic, Providers.Google_AI_Studio, Providers.Bedrock, Providers.OpenAI_Compatible] const [selectedProvider, setSelectedProvider] = useState("OpenAI"); useEffect(() => { if (!accessToken || !token || !userRole || !userID) { return; } const fetchData = async () => { try { // Replace with your actual API call for model data const modelDataResponse = await modelInfoCall( accessToken, userID, userRole ); console.log("Model data response:", modelDataResponse.data); setModelData(modelDataResponse); const modelMetricsResponse = await modelMetricsCall( accessToken, userID, userRole ); console.log("Model metrics response:", modelMetricsResponse); setModelMetrics(modelMetricsResponse); // if userRole is Admin, show the pending requests if (userRole === "Admin" && accessToken) { const user_requests = await userGetRequesedtModelsCall(accessToken); console.log("Pending Requests:", pendingRequests); setPendingRequests(user_requests.requests || []); } } catch (error) { console.error("There was an error fetching the model data", error); } }; if (accessToken && token && userRole && userID) { fetchData(); } const fetchModelMap = async () => { const data = await modelCostMap() console.log(`received model cost map data: ${Object.keys(data)}`) setModelMap(data) } if (modelMap == null) { fetchModelMap() } }, [accessToken, token, userRole, userID, modelMap]); if (!modelData) { return
Loading...
; } if (!accessToken || !token || !userRole || !userID) { return
Loading...
; } let all_models_on_proxy: any[] = []; // loop through model data and edit each row for (let i = 0; i < modelData.data.length; i++) { let curr_model = modelData.data[i]; let litellm_model_name = curr_model?.litellm_params?.model let model_info = curr_model?.model_info; let defaultProvider = "openai"; let provider = ""; let input_cost = "Undefined"; let output_cost = "Undefined"; let max_tokens = "Undefined"; let cleanedLitellmParams = {}; const getProviderFromModel = (model: string) => { /** * Use model map * - check if model in model map * - return it's litellm_provider, if so */ console.log(`GET PROVIDER CALLED! - ${modelMap}`) if (modelMap !== null && modelMap !== undefined) { if (typeof modelMap == "object" && model in modelMap) { return modelMap[model]["litellm_provider"] } } return "openai" } // Check if litellm_model_name is null or undefined if (litellm_model_name) { // Split litellm_model_name based on "/" let splitModel = litellm_model_name.split("/"); // Get the first element in the split let firstElement = splitModel[0]; // If there is only one element, default provider to openai provider = splitModel.length === 1 ? getProviderFromModel(litellm_model_name) : firstElement; } else { // litellm_model_name is null or undefined, default provider to openai provider = "openai" } if (model_info) { input_cost = model_info?.input_cost_per_token; output_cost = model_info?.output_cost_per_token; max_tokens = model_info?.max_tokens; } // let cleanedLitellmParams == litellm_params without model, api_base if (curr_model?.litellm_params) { cleanedLitellmParams = Object.fromEntries( Object.entries(curr_model?.litellm_params).filter( ([key]) => key !== "model" && key !== "api_base" ) ); } modelData.data[i].provider = provider; modelData.data[i].input_cost = input_cost; modelData.data[i].output_cost = output_cost; modelData.data[i].max_tokens = max_tokens; modelData.data[i].api_base = curr_model?.litellm_params?.api_base; modelData.data[i].cleanedLitellmParams = cleanedLitellmParams; all_models_on_proxy.push(curr_model.model_name); console.log(modelData.data[i]); } // when users click request access show pop up to allow them to request access if (userRole && userRole == "Admin Viewer") { const { Title, Paragraph } = Typography; return (
Access Denied Ask your proxy admin for access to view all models
); } const handleDelete = async (model_id: string) => { await modelDeleteCall(accessToken, model_id) }; const setProviderModelsFn = (provider: string) => { console.log(`received provider string: ${provider}`) const providerEnumValue = Providers[provider as keyof typeof Providers]; console.log(`received providerEnumValue: ${providerEnumValue}`) const mappingResult = provider_map[providerEnumValue]; // Get the corresponding value from the mapping console.log(`mappingResult: ${mappingResult}`) let _providerModels: Array = [] if (typeof modelMap === 'object') { Object.entries(modelMap).forEach(([key, value]) => { if (value !== null && typeof value === 'object' && "litellm_provider" in value && value["litellm_provider"] === mappingResult) { _providerModels.push(key); } }); } setProviderModels(_providerModels) console.log(`providerModels: ${providerModels}`); } const handleSubmit = async (formValues: Record) => { try { /** * For multiple litellm model names - create a separate deployment for each * - get the list * - iterate through it * - create a new deployment for each */ // get the list of deployments let deployments: Array = Object.values(formValues["model"]) console.log(`received deployments: ${deployments}`) console.log(`received type of deployments: ${typeof deployments}`) deployments.forEach(async (litellm_model) => { console.log(`litellm_model: ${litellm_model}`) const litellmParamsObj: Record = {}; const modelInfoObj: Record = {}; // Iterate through the key-value pairs in formValues litellmParamsObj["model"] = litellm_model let modelName: string = ""; for (const [key, value] of Object.entries(formValues)) { if (key == "model_name") { modelName = modelName + value } else if (key == "custom_llm_provider") { // const providerEnumValue = Providers[value as keyof typeof Providers]; // const mappingResult = provider_map[providerEnumValue]; // Get the corresponding value from the mapping // modelName = mappingResult + "/" + modelName continue } else if (key == "model") { continue } // Check if key is "base_model" else if (key === "base_model") { // Add key-value pair to model_info dictionary modelInfoObj[key] = value; } else if (key == "litellm_extra_params") { console.log("litellm_extra_params:", value); let litellmExtraParams = {}; if (value && value != undefined) { try { litellmExtraParams = JSON.parse(value); } catch (error) { message.error("Failed to parse LiteLLM Extra Params: " + error, 20); throw new Error("Failed to parse litellm_extra_params: " + error); } for (const [key, value] of Object.entries(litellmExtraParams)) { litellmParamsObj[key] = value; } } } // Check if key is any of the specified API related keys else { // Add key-value pair to litellm_params dictionary litellmParamsObj[key] = value; } } const new_model: Model = { "model_name": modelName, "litellm_params": litellmParamsObj, "model_info": modelInfoObj } const response: any = await modelCreateCall( accessToken, new_model ); console.log(`response for model create call: ${response["data"]}`); }); form.resetFields(); } catch (error) { message.error("Failed to create model: " + error, 20); } } const handleOk = () => { form .validateFields() .then((values) => { handleSubmit(values); // form.resetFields(); }) .catch((error) => { console.error("Validation failed:", error); }); }; console.log(`selectedProvider: ${selectedProvider}`) console.log(`providerModels.length: ${providerModels.length}`) return (
All Models Add Model Model Name Provider { userRole === "Admin" && ( API Base ) } Extra litellm Params Input Price per token ($) Output Price per token ($) Max Tokens {modelData.data.map((model: any, index: number) => ( {model.model_name} {model.provider} { userRole === "Admin" && ( {model.api_base} ) }
                    {JSON.stringify(model.cleanedLitellmParams, null, 2)}
                    
{model.input_cost} {model.output_cost} {model.max_tokens} handleDelete(model.model_info.id)}/>
))}
Model Statistics (Number Requests) Model Statistics (Latency)
{/* */} Add new model
<> Model name your users will pass in. {selectedProvider === Providers.Azure ? ( ) : providerModels.length > 0 ? ( {providerModels.map((model, index) => ( {model} ))} ) : ( )} Actual model name used for makinglitellm.completion() call.We'llloadbalance models with the same 'public name' { selectedProvider != Providers.Bedrock && } { selectedProvider == Providers.OpenAI && } { (selectedProvider == Providers.Azure || selectedProvider == Providers.OpenAI_Compatible) && } { selectedProvider == Providers.Azure && } { selectedProvider == Providers.Azure && The actual model your azure deployment uses. Used for accurate cost tracking. Select name from here } { selectedProvider == Providers.Bedrock && } { selectedProvider == Providers.Bedrock && } { selectedProvider == Providers.Bedrock && }