import React, { useState, useEffect } from "react"; import { Card, Title, Subtitle, Table, TableHead, TableRow, TableHeaderCell, TableCell, TableBody, Metric, Text, Grid, Accordion, AccordionHeader, AccordionBody, } from "@tremor/react"; import { TabPanel, TabPanels, TabGroup, TabList, Tab, TextInput, Icon, DateRangePicker, } from "@tremor/react"; import { Select, SelectItem, MultiSelect, MultiSelectItem, DateRangePickerValue, } from "@tremor/react"; import { modelInfoCall, userGetRequesedtModelsCall, modelCreateCall, Model, modelCostMap, modelDeleteCall, healthCheckCall, modelUpdateCall, modelMetricsCall, streamingModelMetricsCall, modelExceptionsCall, modelMetricsSlowResponsesCall, getCallbacksCall, setCallbacksCall, modelSettingsCall, } from "./networking"; import { BarChart, AreaChart } from "@tremor/react"; import { Button as Button2, Modal, Form, Input, Select as Select2, InputNumber, message, Descriptions, Tooltip, Space, Row, Col, } from "antd"; import { Badge, BadgeDelta, Button } from "@tremor/react"; import RequestAccess from "./request_model_access"; import { Typography } from "antd"; import TextArea from "antd/es/input/TextArea"; import { InformationCircleIcon, PencilAltIcon, PencilIcon, StatusOnlineIcon, TrashIcon, RefreshIcon, CheckCircleIcon, XCircleIcon, } from "@heroicons/react/outline"; import DeleteModelButton from "./delete_model_button"; const { Title: Title2, Link } = Typography; import { UploadOutlined } from "@ant-design/icons"; import type { UploadProps } from "antd"; import { Upload } from "antd"; import TimeToFirstToken from "./model_metrics/time_to_first_token"; import DynamicFields from "./model_add/dynamic_form"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; interface ModelDashboardProps { accessToken: string | null; token: string | null; userRole: string | null; userID: string | null; modelData: any; setModelData: any; premiumUser: boolean; } interface EditModelModalProps { visible: boolean; onCancel: () => void; model: any; // Assuming TeamType is a type representing your team object onSubmit: (data: FormData) => void; // Assuming FormData is the type of data to be submitted } interface RetryPolicyObject { [key: string]: { [retryPolicyKey: string]: number } | undefined; } //["OpenAI", "Azure OpenAI", "Anthropic", "Gemini (Google AI Studio)", "Amazon Bedrock", "OpenAI-Compatible Endpoints (Groq, Together AI, Mistral AI, etc.)"] interface ProviderFields { field_name: string; field_type: string; field_description: string; field_value: string; } interface ProviderSettings { name: string; fields: ProviderFields[]; } enum Providers { OpenAI = "OpenAI", Azure = "Azure", Anthropic = "Anthropic", Google_AI_Studio = "Google AI Studio", Bedrock = "Amazon Bedrock", OpenAI_Compatible = "OpenAI-Compatible Endpoints (Groq, Together AI, Mistral AI, etc.)", Vertex_AI = "Vertex AI (Anthropic, Gemini, etc.)", Databricks = "Databricks", } const provider_map: Record = { OpenAI: "openai", Azure: "azure", Anthropic: "anthropic", Google_AI_Studio: "gemini", Bedrock: "bedrock", OpenAI_Compatible: "openai", Vertex_AI: "vertex_ai", Databricks: "databricks", }; const retry_policy_map: Record = { "BadRequestError (400)": "BadRequestErrorRetries", "AuthenticationError (401)": "AuthenticationErrorRetries", "TimeoutError (408)": "TimeoutErrorRetries", "RateLimitError (429)": "RateLimitErrorRetries", "ContentPolicyViolationError (400)": "ContentPolicyViolationErrorRetries", "InternalServerError (500)": "InternalServerErrorRetries", }; const handleSubmit = async ( formValues: Record, accessToken: string, form: any ) => { try { /** * For multiple litellm model names - create a separate deployment for each * - get the list * - iterate through it * - create a new deployment for each * * For single model name -> make it a 1 item list */ // get the list of deployments let deployments: Array = Array.isArray(formValues["model"]) ? formValues["model"] : [formValues["model"]]; console.log(`received deployments: ${deployments}`); console.log(`received type of deployments: ${typeof deployments}`); deployments.forEach(async (litellm_model) => { console.log(`litellm_model: ${litellm_model}`); const litellmParamsObj: Record = {}; const modelInfoObj: Record = {}; // Iterate through the key-value pairs in formValues litellmParamsObj["model"] = litellm_model; let modelName: string = ""; console.log("formValues add deployment:", formValues); for (const [key, value] of Object.entries(formValues)) { if (value === "") { continue; } if (key == "model_name") { modelName = modelName + value; } else if (key == "custom_llm_provider") { // const providerEnumValue = Providers[value as keyof typeof Providers]; // const mappingResult = provider_map[providerEnumValue]; // Get the corresponding value from the mapping // modelName = mappingResult + "/" + modelName continue; } else if (key == "model") { continue; } // Check if key is "base_model" else if (key === "base_model") { // Add key-value pair to model_info dictionary modelInfoObj[key] = value; } else if (key == "litellm_extra_params") { console.log("litellm_extra_params:", value); let litellmExtraParams = {}; if (value && value != undefined) { try { litellmExtraParams = JSON.parse(value); } catch (error) { message.error( "Failed to parse LiteLLM Extra Params: " + error, 10 ); throw new Error("Failed to parse litellm_extra_params: " + error); } for (const [key, value] of Object.entries(litellmExtraParams)) { litellmParamsObj[key] = value; } } } // Check if key is any of the specified API related keys else { // Add key-value pair to litellm_params dictionary litellmParamsObj[key] = value; } } const new_model: Model = { model_name: modelName, litellm_params: litellmParamsObj, model_info: modelInfoObj, }; const response: any = await modelCreateCall(accessToken, new_model); console.log(`response for model create call: ${response["data"]}`); }); form.resetFields(); } catch (error) { message.error("Failed to create model: " + error, 10); } }; const ModelDashboard: React.FC = ({ accessToken, token, userRole, userID, modelData = { data: [] }, setModelData, premiumUser, }) => { const [pendingRequests, setPendingRequests] = useState([]); const [form] = Form.useForm(); const [modelMap, setModelMap] = useState(null); const [lastRefreshed, setLastRefreshed] = useState(""); const [providerModels, setProviderModels] = useState>([]); // Explicitly typing providerModels as a string array const providers = Object.values(Providers).filter((key) => isNaN(Number(key)) ); const [providerSettings, setProviderSettings] = useState( [] ); const [selectedProvider, setSelectedProvider] = useState("OpenAI"); const [healthCheckResponse, setHealthCheckResponse] = useState(""); const [editModalVisible, setEditModalVisible] = useState(false); const [infoModalVisible, setInfoModalVisible] = useState(false); const [selectedModel, setSelectedModel] = useState(null); const [availableModelGroups, setAvailableModelGroups] = useState< Array >([]); const [selectedModelGroup, setSelectedModelGroup] = useState( null ); const [modelLatencyMetrics, setModelLatencyMetrics] = useState([]); const [modelMetrics, setModelMetrics] = useState([]); const [modelMetricsCategories, setModelMetricsCategories] = useState( [] ); const [streamingModelMetrics, setStreamingModelMetrics] = useState([]); const [streamingModelMetricsCategories, setStreamingModelMetricsCategories] = useState([]); const [modelExceptions, setModelExceptions] = useState([]); const [allExceptions, setAllExceptions] = useState([]); const [failureTableData, setFailureTableData] = useState([]); const [slowResponsesData, setSlowResponsesData] = useState([]); const [dateValue, setDateValue] = useState({ from: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000), to: new Date(), }); const [modelGroupRetryPolicy, setModelGroupRetryPolicy] = useState(null); const [defaultRetry, setDefaultRetry] = useState(0); function formatCreatedAt(createdAt: string | null) { if (createdAt) { const date = new Date(createdAt); const options = { month: "long", day: "numeric", year: "numeric" }; return date.toLocaleDateString("en-US"); } return null; } const EditModelModal: React.FC = ({ visible, onCancel, model, onSubmit, }) => { const [form] = Form.useForm(); let litellm_params_to_edit: Record = {}; let model_name = ""; let model_id = ""; if (model) { litellm_params_to_edit = model.litellm_params; model_name = model.model_name; let model_info = model.model_info; if (model_info) { model_id = model_info.id; console.log(`model_id: ${model_id}`); litellm_params_to_edit.model_id = model_id; } } const handleOk = () => { form .validateFields() .then((values) => { onSubmit(values); form.resetFields(); }) .catch((error) => { console.error("Validation failed:", error); }); }; return (
<>
Save
); }; const handleEditClick = (model: any) => { setSelectedModel(model); setEditModalVisible(true); }; const handleInfoClick = (model: any) => { setSelectedModel(model); setInfoModalVisible(true); }; const handleEditCancel = () => { setEditModalVisible(false); setSelectedModel(null); }; const handleInfoCancel = () => { setInfoModalVisible(false); setSelectedModel(null); }; const handleEditSubmit = async (formValues: Record) => { // Call API to update team with teamId and values console.log("handleEditSubmit:", formValues); if (accessToken == null) { return; } let newLiteLLMParams: Record = {}; let model_info_model_id = null; for (const [key, value] of Object.entries(formValues)) { if (key !== "model_id") { newLiteLLMParams[key] = value; } else { model_info_model_id = value; } } let payload = { litellm_params: newLiteLLMParams, model_info: { id: model_info_model_id, }, }; console.log("handleEditSubmit payload:", payload); try { let newModelValue = await modelUpdateCall(accessToken, payload); message.success( "Model updated successfully, restart server to see updates" ); setEditModalVisible(false); setSelectedModel(null); } catch (error) { console.log(`Error occurred`); } }; const props: UploadProps = { name: "file", accept: ".json", beforeUpload: (file) => { if (file.type === "application/json") { const reader = new FileReader(); reader.onload = (e) => { if (e.target) { const jsonStr = e.target.result as string; form.setFieldsValue({ vertex_credentials: jsonStr }); } }; reader.readAsText(file); } // Prevent upload return false; }, onChange(info) { if (info.file.status !== "uploading") { console.log(info.file, info.fileList); } if (info.file.status === "done") { message.success(`${info.file.name} file uploaded successfully`); } else if (info.file.status === "error") { message.error(`${info.file.name} file upload failed.`); } }, }; const handleRefreshClick = () => { // Update the 'lastRefreshed' state to the current date and time const currentDate = new Date(); setLastRefreshed(currentDate.toLocaleString()); }; const handleSaveRetrySettings = async () => { if (!accessToken) { console.error("Access token is missing"); return; } console.log("new modelGroupRetryPolicy:", modelGroupRetryPolicy); try { const payload = { router_settings: { model_group_retry_policy: modelGroupRetryPolicy, }, }; await setCallbacksCall(accessToken, payload); message.success("Retry settings saved successfully"); } catch (error) { console.error("Failed to save retry settings:", error); message.error("Failed to save retry settings"); } }; useEffect(() => { if (!accessToken || !token || !userRole || !userID) { return; } const fetchData = async () => { try { const _providerSettings = await modelSettingsCall(accessToken); setProviderSettings(_providerSettings); // Replace with your actual API call for model data const modelDataResponse = await modelInfoCall( accessToken, userID, userRole ); console.log("Model data response:", modelDataResponse.data); setModelData(modelDataResponse); // loop through modelDataResponse and get all`model_name` values let all_model_groups: Set = new Set(); for (let i = 0; i < modelDataResponse.data.length; i++) { const model = modelDataResponse.data[i]; all_model_groups.add(model.model_name); } console.log("all_model_groups:", all_model_groups); let _array_model_groups = Array.from(all_model_groups); // sort _array_model_groups alphabetically _array_model_groups = _array_model_groups.sort(); setAvailableModelGroups(_array_model_groups); console.log("array_model_groups:", _array_model_groups); let _initial_model_group = "all"; if (_array_model_groups.length > 0) { // set selectedModelGroup to the last model group _initial_model_group = _array_model_groups[_array_model_groups.length - 1]; console.log("_initial_model_group:", _initial_model_group); setSelectedModelGroup(_initial_model_group); } console.log("selectedModelGroup:", selectedModelGroup); const modelMetricsResponse = await modelMetricsCall( accessToken, userID, userRole, _initial_model_group, dateValue.from?.toISOString(), dateValue.to?.toISOString() ); console.log("Model metrics response:", modelMetricsResponse); // Sort by latency (avg_latency_per_token) setModelMetrics(modelMetricsResponse.data); setModelMetricsCategories(modelMetricsResponse.all_api_bases); const streamingModelMetricsResponse = await streamingModelMetricsCall( accessToken, _initial_model_group, dateValue.from?.toISOString(), dateValue.to?.toISOString() ); // Assuming modelMetricsResponse now contains the metric data for the specified model group setStreamingModelMetrics(streamingModelMetricsResponse.data); setStreamingModelMetricsCategories( streamingModelMetricsResponse.all_api_bases ); const modelExceptionsResponse = await modelExceptionsCall( accessToken, userID, userRole, _initial_model_group, dateValue.from?.toISOString(), dateValue.to?.toISOString() ); console.log("Model exceptions response:", modelExceptionsResponse); setModelExceptions(modelExceptionsResponse.data); setAllExceptions(modelExceptionsResponse.exception_types); const slowResponses = await modelMetricsSlowResponsesCall( accessToken, userID, userRole, _initial_model_group, dateValue.from?.toISOString(), dateValue.to?.toISOString() ); console.log("slowResponses:", slowResponses); setSlowResponsesData(slowResponses); const routerSettingsInfo = await getCallbacksCall( accessToken, userID, userRole ); let router_settings = routerSettingsInfo.router_settings; console.log("routerSettingsInfo:", router_settings); let model_group_retry_policy = router_settings.model_group_retry_policy; let default_retries = router_settings.num_retries; console.log("model_group_retry_policy:", model_group_retry_policy); console.log("default_retries:", default_retries); setModelGroupRetryPolicy(model_group_retry_policy); setDefaultRetry(default_retries); } catch (error) { console.error("There was an error fetching the model data", error); } }; if (accessToken && token && userRole && userID) { fetchData(); } const fetchModelMap = async () => { const data = await modelCostMap(); console.log(`received model cost map data: ${Object.keys(data)}`); setModelMap(data); }; if (modelMap == null) { fetchModelMap(); } handleRefreshClick(); }, [accessToken, token, userRole, userID, modelMap, lastRefreshed]); if (!modelData) { return
Loading...
; } if (!accessToken || !token || !userRole || !userID) { return
Loading...
; } let all_models_on_proxy: any[] = []; // loop through model data and edit each row for (let i = 0; i < modelData.data.length; i++) { let curr_model = modelData.data[i]; let litellm_model_name = curr_model?.litellm_params?.model; let model_info = curr_model?.model_info; let defaultProvider = "openai"; let provider = ""; let input_cost = "Undefined"; let output_cost = "Undefined"; let max_tokens = "Undefined"; let max_input_tokens = "Undefined"; let cleanedLitellmParams = {}; const getProviderFromModel = (model: string) => { /** * Use model map * - check if model in model map * - return it's litellm_provider, if so */ console.log(`GET PROVIDER CALLED! - ${modelMap}`); if (modelMap !== null && modelMap !== undefined) { if (typeof modelMap == "object" && model in modelMap) { return modelMap[model]["litellm_provider"]; } } return "openai"; }; // Check if litellm_model_name is null or undefined if (litellm_model_name) { // Split litellm_model_name based on "/" let splitModel = litellm_model_name.split("/"); // Get the first element in the split let firstElement = splitModel[0]; // If there is only one element, default provider to openai provider = splitModel.length === 1 ? getProviderFromModel(litellm_model_name) : firstElement; } else { // litellm_model_name is null or undefined, default provider to openai provider = "openai"; } if (model_info) { input_cost = model_info?.input_cost_per_token; output_cost = model_info?.output_cost_per_token; max_tokens = model_info?.max_tokens; max_input_tokens = model_info?.max_input_tokens; } if (curr_model?.litellm_params) { cleanedLitellmParams = Object.fromEntries( Object.entries(curr_model?.litellm_params).filter( ([key]) => key !== "model" && key !== "api_base" ) ); } modelData.data[i].provider = provider; modelData.data[i].input_cost = input_cost; modelData.data[i].output_cost = output_cost; // Convert Cost in terms of Cost per 1M tokens if (modelData.data[i].input_cost) { modelData.data[i].input_cost = ( Number(modelData.data[i].input_cost) * 1000000 ).toFixed(2); } if (modelData.data[i].output_cost) { modelData.data[i].output_cost = ( Number(modelData.data[i].output_cost) * 1000000 ).toFixed(2); } modelData.data[i].max_tokens = max_tokens; modelData.data[i].max_input_tokens = max_input_tokens; modelData.data[i].api_base = curr_model?.litellm_params?.api_base; modelData.data[i].cleanedLitellmParams = cleanedLitellmParams; all_models_on_proxy.push(curr_model.model_name); console.log(modelData.data[i]); } // when users click request access show pop up to allow them to request access if (userRole && userRole == "Admin Viewer") { const { Title, Paragraph } = Typography; return (
Access Denied Ask your proxy admin for access to view all models
); } const setProviderModelsFn = (provider: string) => { console.log(`received provider string: ${provider}`); const providerKey = Object.keys(Providers).find( (key) => (Providers as { [index: string]: any })[key] === provider ); if (providerKey) { const mappingResult = provider_map[providerKey]; // Get the corresponding value from the mapping console.log(`mappingResult: ${mappingResult}`); let _providerModels: Array = []; if (typeof modelMap === "object") { Object.entries(modelMap).forEach(([key, value]) => { if ( value !== null && typeof value === "object" && "litellm_provider" in (value as object) && ((value as any)["litellm_provider"] === mappingResult || (value as any)["litellm_provider"].includes(mappingResult)) ) { _providerModels.push(key); } }); } setProviderModels(_providerModels); console.log(`providerModels: ${providerModels}`); } }; const runHealthCheck = async () => { try { message.info("Running health check..."); setHealthCheckResponse(""); const response = await healthCheckCall(accessToken); setHealthCheckResponse(response); } catch (error) { console.error("Error running health check:", error); setHealthCheckResponse("Error running health check"); } }; const updateModelMetrics = async ( modelGroup: string | null, startTime: Date | undefined, endTime: Date | undefined ) => { console.log("Updating model metrics for group:", modelGroup); if (!accessToken || !userID || !userRole || !startTime || !endTime) { return; } console.log( "inside updateModelMetrics - startTime:", startTime, "endTime:", endTime ); setSelectedModelGroup(modelGroup); // If you want to store the selected model group in state try { const modelMetricsResponse = await modelMetricsCall( accessToken, userID, userRole, modelGroup, startTime.toISOString(), endTime.toISOString() ); console.log("Model metrics response:", modelMetricsResponse); // Assuming modelMetricsResponse now contains the metric data for the specified model group setModelMetrics(modelMetricsResponse.data); setModelMetricsCategories(modelMetricsResponse.all_api_bases); const streamingModelMetricsResponse = await streamingModelMetricsCall( accessToken, modelGroup, startTime.toISOString(), endTime.toISOString() ); // Assuming modelMetricsResponse now contains the metric data for the specified model group setStreamingModelMetrics(streamingModelMetricsResponse.data); setStreamingModelMetricsCategories( streamingModelMetricsResponse.all_api_bases ); const modelExceptionsResponse = await modelExceptionsCall( accessToken, userID, userRole, modelGroup, startTime.toISOString(), endTime.toISOString() ); console.log("Model exceptions response:", modelExceptionsResponse); setModelExceptions(modelExceptionsResponse.data); setAllExceptions(modelExceptionsResponse.exception_types); const slowResponses = await modelMetricsSlowResponsesCall( accessToken, userID, userRole, modelGroup, startTime.toISOString(), endTime.toISOString() ); console.log("slowResponses:", slowResponses); setSlowResponsesData(slowResponses); } catch (error) { console.error("Failed to fetch model metrics", error); } }; const customTooltip = (props: any) => { const { payload, active } = props; if (!active || !payload) return null; // Extract the date from the first item in the payload array const date = payload[0]?.payload?.date; // Sort the payload array by category.value in descending order let sortedPayload = payload.sort((a: any, b: any) => b.value - a.value); // Only show the top 5, the 6th one should be called "X other categories" depending on how many categories were not shown if (sortedPayload.length > 5) { let remainingItems = sortedPayload.length - 5; sortedPayload = sortedPayload.slice(0, 5); sortedPayload.push({ dataKey: `${remainingItems} other deployments`, value: payload .slice(5) .reduce((acc: number, curr: any) => acc + curr.value, 0), color: "gray", }); } return (
{date && (

Date: {date}

)} {sortedPayload.map((category: any, idx: number) => { const roundedValue = parseFloat(category.value.toFixed(5)); const displayValue = roundedValue === 0 && category.value > 0 ? "<0.00001" : roundedValue.toFixed(5); return (

{category.dataKey}

{displayValue}

); })}
); }; const getPlaceholder = (selectedProvider: string): string => { if (selectedProvider === Providers.Vertex_AI) { return "gemini-pro"; } else if (selectedProvider == Providers.Anthropic) { return "claude-3-opus"; } else if (selectedProvider == Providers.Bedrock) { return "claude-3-opus"; } else if (selectedProvider == Providers.Google_AI_Studio) { return "gemini-pro"; } else { return "gpt-3.5-turbo"; } }; const handleOk = () => { form .validateFields() .then((values) => { handleSubmit(values, accessToken, form); // form.resetFields(); }) .catch((error) => { console.error("Validation failed:", error); }); }; console.log(`selectedProvider: ${selectedProvider}`); console.log(`providerModels.length: ${providerModels.length}`); const providerKey = Object.keys(Providers).find( (key) => (Providers as { [index: string]: any })[key] === selectedProvider ); let dynamicProviderForm: ProviderSettings | undefined = undefined; if (providerKey) { dynamicProviderForm = providerSettings.find( (provider) => provider.name === provider_map[providerKey] ); } return (
All Models Add Model
/health Models
Model Analytics Model Retry Settings
{lastRefreshed && Last Refreshed: {lastRefreshed}}
Filter by Public Model Name
Public Model Name Provider {userRole === "Admin" && ( API Base )} Input Price{" "}

/1M Tokens ($)

Output Price{" "}

/1M Tokens ($)

{premiumUser ? ( "Created At" ) : ( {" "} ✨ Created At )} {premiumUser ? ( "Created By" ) : ( {" "} ✨ Created By )} Status
{modelData.data .filter( (model: any) => selectedModelGroup === "all" || model.model_name === selectedModelGroup || selectedModelGroup === null || selectedModelGroup === undefined || selectedModelGroup === "" ) .map((model: any, index: number) => (

{model.model_name || "-"}

{model.provider || "-"}

{userRole === "Admin" && (
                                  {model && model.api_base
                                    ? model.api_base.slice(0, 20)
                                    : "-"}
                                
)}
                              {model.input_cost
                                ? model.input_cost
                                : model.litellm_params.input_cost_per_token
                                  ? (
                                      Number(
                                        model.litellm_params
                                          .input_cost_per_token
                                      ) * 1000000
                                    ).toFixed(2)
                                  : null}
                            
                              {model.output_cost
                                ? model.output_cost
                                : model.litellm_params.output_cost_per_token
                                  ? (
                                      Number(
                                        model.litellm_params
                                          .output_cost_per_token
                                      ) * 1000000
                                    ).toFixed(2)
                                  : null}
                            

{premiumUser ? formatCreatedAt( model.model_info.created_at ) || "-" : "-"}

{premiumUser ? model.model_info.created_by || "-" : "-"}

{model.model_info.db_model ? (

DB Model

) : (

Config Model

)}
handleInfoClick(model)} /> handleEditClick(model)} /> ))}
Model Info {selectedModel && JSON.stringify(selectedModel, null, 2)}
Add new model
<> Model name your users will pass in. {selectedProvider === Providers.Azure ? ( ) : providerModels.length > 0 ? ( {providerModels.map((model, index) => ( {model} ))} ) : ( )} Actual model name used for making{" "} litellm.completion() call . We'll{" "} loadbalance {" "} models with the same 'public name' {dynamicProviderForm !== undefined && dynamicProviderForm.fields.length > 0 && ( )} {selectedProvider != Providers.Bedrock && selectedProvider != Providers.Vertex_AI && dynamicProviderForm === undefined && ( )} {selectedProvider == Providers.OpenAI && ( )} {selectedProvider == Providers.Vertex_AI && ( )} {selectedProvider == Providers.Vertex_AI && ( )} {selectedProvider == Providers.Vertex_AI && ( }> Click to Upload )} {selectedProvider == Providers.Vertex_AI && ( Give litellm a gcp service account(.json file), so it can make the relevant calls )} {(selectedProvider == Providers.Azure || selectedProvider == Providers.OpenAI_Compatible) && ( )} {selectedProvider == Providers.Azure && ( )} {selectedProvider == Providers.Azure && (
The actual model your azure deployment uses. Used for accurate cost tracking. Select name from{" "} here
)} {selectedProvider == Providers.Bedrock && ( )} {selectedProvider == Providers.Bedrock && ( )} {selectedProvider == Providers.Bedrock && ( )}