From f2a7e2ee98a0dc8fd356931750d74cee8f8d68c0 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 5 Feb 2024 19:28:57 -0800 Subject: [PATCH 01/56] feat(ui): enable admin to view all valid keys created on the proxy --- litellm/proxy/proxy_server.py | 68 ++++++- litellm/proxy/utils.py | 17 +- .../src/components/navbar.tsx | 72 ++++---- .../src/components/networking.tsx | 82 ++++----- .../src/components/user_dashboard.tsx | 96 +++++----- .../src/components/view_key_spend_report.tsx | 173 +++++++++++------- .../src/components/view_key_table.tsx | 13 +- 7 files changed, 312 insertions(+), 209 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 289a36cb2..0fe6997ee 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -322,6 +322,7 @@ async def user_api_key_auth( f"Malformed API Key passed in. Ensure Key has `Bearer ` prefix. Passed in: {passed_in_key}" ) + ### CHECK IF ADMIN ### # note: never string compare api keys, this is vulenerable to a time attack. Use secrets.compare_digest instead is_master_key_valid = secrets.compare_digest(api_key, master_key) if is_master_key_valid: @@ -454,6 +455,12 @@ async def user_api_key_auth( if _user is None: continue assert isinstance(_user, dict) + # check if user is admin # + if ( + _user.get("user_role", None) is not None + and _user.get("user_role") == "proxy_admin" + ): + return UserAPIKeyAuth(api_key=master_key) # Token exists, not expired now check if its in budget for the user user_max_budget = _user.get("max_budget", None) user_current_spend = _user.get("spend", None) @@ -597,10 +604,13 @@ async def user_api_key_auth( # check if user can access this route query_params = request.query_params user_id = query_params.get("user_id") + verbose_proxy_logger.debug( + f"user_id: {user_id} & valid_token.user_id: {valid_token.user_id}" + ) if user_id != valid_token.user_id: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail="user not allowed to access this key's info", + detail="key not allowed to access this user's info", ) elif route == "/user/update": raise HTTPException( @@ -1846,6 +1856,9 @@ async def startup_event(): if prisma_client is not None and master_key is not None: # add master key to db + user_id = "default_user_id" + if os.getenv("PROXY_ADMIN_ID", None) is not None: + user_id = os.getenv("PROXY_ADMIN_ID") asyncio.create_task( generate_key_helper_fn( duration=None, @@ -1854,7 +1867,8 @@ async def startup_event(): config={}, spend=0, token=master_key, - user_id="default_user_id", + user_id=user_id, + user_role="proxy_admin", ) ) @@ -3380,12 +3394,13 @@ async def auth_callback(request: Request): result = await microsoft_sso.verify_and_process(request) # User is Authe'd in - generate key for the UI to access Proxy - user_id = getattr(result, "email", None) + user_email = getattr(result, "email", None) + user_id = getattr(result, "id", None) if user_id is None: user_id = getattr(result, "first_name", "") + getattr(result, "last_name", "") response = await generate_key_helper_fn( - **{"duration": "1hr", "key_max_budget": 0, "models": [], "aliases": {}, "config": {}, "spend": 0, "user_id": user_id, "team_id": "litellm-dashboard"} # type: ignore + **{"duration": "1hr", "key_max_budget": 0, "models": [], "aliases": {}, "config": {}, "spend": 0, "user_id": user_id, "team_id": "litellm-dashboard", "user_email": user_email} # type: ignore ) key = response["token"] # type: ignore @@ -3393,10 +3408,25 @@ async def auth_callback(request: Request): litellm_dashboard_ui = "/ui/" + user_role = "app_owner" + if ( + os.getenv("PROXY_ADMIN_ID", None) is not None + and os.environ["PROXY_ADMIN_ID"] == user_id + ): + # checks if user is admin + user_role = "app_admin" + import jwt jwt_token = jwt.encode( - {"user_id": user_id, "key": key}, "secret", algorithm="HS256" + { + "user_id": user_id, + "key": key, + "user_email": user_email, + "user_role": user_role, + }, + "secret", + algorithm="HS256", ) litellm_dashboard_ui += "?userID=" + user_id + "&token=" + jwt_token @@ -3409,10 +3439,18 @@ async def auth_callback(request: Request): "/user/info", tags=["user management"], dependencies=[Depends(user_api_key_auth)] ) async def user_info( - user_id: str = fastapi.Query(..., description="User ID in the request parameters") + user_id: Optional[str] = fastapi.Query( + default=None, description="User ID in the request parameters" + ) ): """ Use this to get user information. (user row + all user key info) + + Example request + ``` + curl -X GET 'http://localhost:8000/user/info?user_id=krrish7%40berri.ai' \ + --header 'Authorization: Bearer sk-1234' + ``` """ global prisma_client try: @@ -3421,11 +3459,25 @@ async def user_info( f"Database not connected. Connect a database to your proxy - https://docs.litellm.ai/docs/simple_proxy#managing-auth---virtual-keys" ) ## GET USER ROW ## - user_info = await prisma_client.get_data(user_id=user_id) + if user_id is not None: + user_info = await prisma_client.get_data(user_id=user_id) + else: + user_info = None ## GET ALL KEYS ## keys = await prisma_client.get_data( - user_id=user_id, table_name="key", query_type="find_all" + user_id=user_id, + table_name="key", + query_type="find_all", + expires=datetime.now(), ) + + if user_info is None: + ## make sure we still return a total spend ## + spend = 0 + for k in keys: + spend += getattr(k, "spend", 0) + user_info = {"spend": spend} + ## REMOVE HASHED TOKEN INFO before returning ## for key in keys: try: diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 84b09d726..62cbc6b4b 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -559,9 +559,20 @@ class PrismaClient: # The asterisk before `user_id_list` unpacks the list into separate arguments response = await self.db.query_raw(sql_query) elif query_type == "find_all": - response = await self.db.litellm_usertable.find_many( # type: ignore - order={"spend": "desc"}, - ) + if expires is not None: + response = await self.db.litellm_usertable.find_many( # type: ignore + order={"spend": "desc"}, + where={ # type:ignore + "OR": [ + {"expires": None}, # type:ignore + {"expires": {"gt": expires}}, # type:ignore + ], + }, + ) + else: + response = await self.db.litellm_usertable.find_many( # type: ignore + order={"spend": "desc"}, + ) return response elif table_name == "spend": verbose_proxy_logger.debug( diff --git a/ui/litellm-dashboard/src/components/navbar.tsx b/ui/litellm-dashboard/src/components/navbar.tsx index b7cb35730..946cfc447 100644 --- a/ui/litellm-dashboard/src/components/navbar.tsx +++ b/ui/litellm-dashboard/src/components/navbar.tsx @@ -1,40 +1,50 @@ "use client"; -import Link from 'next/link'; -import Image from 'next/image' -import React, { useState } from 'react'; +import Link from "next/link"; +import Image from "next/image"; +import React, { useState } from "react"; import { useSearchParams } from "next/navigation"; -import { Button, Text, Metric,Title, TextInput, Grid, Col, Card } from "@tremor/react"; +import { + Button, + Text, + Metric, + Title, + TextInput, + Grid, + Col, + Card, +} from "@tremor/react"; // Define the props type interface NavbarProps { - userID: string | null; - userRole: string | null; + userID: string | null; + userRole: string | null; + userEmail: string | null; } -const Navbar: React.FC = ({ userID, userRole }) => { - console.log("User ID:", userID); +const Navbar: React.FC = ({ userID, userRole, userEmail }) => { + console.log("User ID:", userID); + console.log("userEmail:", userEmail); - return ( - - ) -} + return ( + + ); +}; -export default Navbar; \ No newline at end of file +export default Navbar; diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 4763e475e..5b8e42286 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -1,24 +1,24 @@ /** * Helper file for calls being made to proxy */ -import { message } from 'antd'; +import { message } from "antd"; -const proxyBaseUrl = null; -// const proxyBaseUrl = "http://localhost:4000" // http://localhost:4000 +const isLocal = process.env.NODE_ENV === "development"; +const proxyBaseUrl = isLocal ? "http://localhost:4000" : null; export const keyCreateCall = async ( accessToken: string, userID: string, - formValues: Record, // Assuming formValues is an object + formValues: Record // Assuming formValues is an object ) => { try { - console.log("Form Values in keyCreateCall:", formValues); // Log the form values before making the API call - + console.log("Form Values in keyCreateCall:", formValues); // Log the form values before making the API call + // check if formValues.description is not undefined, make it a string and add it to formValues.metadata if (formValues.description) { // add to formValues.metadata if (!formValues.metadata) { - formValues.metadata = {} + formValues.metadata = {}; } // value needs to be in "", valid JSON formValues.metadata.description = formValues.description; @@ -26,7 +26,7 @@ export const keyCreateCall = async ( delete formValues.description; formValues.metadata = JSON.stringify(formValues.metadata); } - // if formValues.metadata is not undefined, make it a valid dict + // if formValues.metadata is not undefined, make it a valid dict if (formValues.metadata) { console.log("formValues.metadata:", formValues.metadata); // if there's an exception JSON.parse, show it in the message @@ -69,15 +69,11 @@ export const keyCreateCall = async ( } }; - -export const keyDeleteCall = async ( - accessToken: String, - user_key: String -) => { +export const keyDeleteCall = async (accessToken: String, user_key: String) => { try { const url = proxyBaseUrl ? `${proxyBaseUrl}/key/delete` : `/key/delete`; - console.log("in keyDeleteCall:", user_key) - + console.log("in keyDeleteCall:", user_key); + const response = await fetch(url, { method: "POST", headers: { @@ -108,21 +104,22 @@ export const keyDeleteCall = async ( export const userInfoCall = async ( accessToken: String, - userID: String + userID: String, + userRole: String ) => { try { - const url = proxyBaseUrl ? `${proxyBaseUrl}/user/info` : `/user/info`; - console.log("in userInfoCall:", url) - const response = await fetch( - `${url}/?user_id=${userID}`, - { - method: "GET", - headers: { - Authorization: `Bearer ${accessToken}`, - "Content-Type": "application/json", - }, - } - ); + let url = proxyBaseUrl ? `${proxyBaseUrl}/user/info` : `/user/info`; + if (userRole == "App Owner") { + url = `${url}/?user_id=${userID}`; + } + message.info("Requesting user data"); + const response = await fetch(url, { + method: "GET", + headers: { + Authorization: `Bearer ${accessToken}`, + "Content-Type": "application/json", + }, + }); if (!response.ok) { const errorData = await response.text(); @@ -131,7 +128,7 @@ export const userInfoCall = async ( } const data = await response.json(); - console.log(data); + message.info("Received user data"); return data; // Handle success - you might want to update some state or UI based on the created key } catch (error) { @@ -140,24 +137,17 @@ export const userInfoCall = async ( } }; - -export const keySpendLogsCall = async ( - accessToken: String, - token: String -) => { +export const keySpendLogsCall = async (accessToken: String, token: String) => { try { const url = proxyBaseUrl ? `${proxyBaseUrl}/spend/logs` : `/spend/logs`; - console.log("in keySpendLogsCall:", url) - const response = await fetch( - `${url}/?api_key=${token}`, - { - method: "GET", - headers: { - Authorization: `Bearer ${accessToken}`, - "Content-Type": "application/json", - }, - } - ); + console.log("in keySpendLogsCall:", url); + const response = await fetch(`${url}/?api_key=${token}`, { + method: "GET", + headers: { + Authorization: `Bearer ${accessToken}`, + "Content-Type": "application/json", + }, + }); if (!response.ok) { const errorData = await response.text(); message.error(errorData); @@ -171,4 +161,4 @@ export const keySpendLogsCall = async ( console.error("Failed to create key:", error); throw error; } -} +}; diff --git a/ui/litellm-dashboard/src/components/user_dashboard.tsx b/ui/litellm-dashboard/src/components/user_dashboard.tsx index 951d0287b..b1a06939b 100644 --- a/ui/litellm-dashboard/src/components/user_dashboard.tsx +++ b/ui/litellm-dashboard/src/components/user_dashboard.tsx @@ -6,21 +6,25 @@ import CreateKey from "./create_key_button"; import ViewKeyTable from "./view_key_table"; import ViewUserSpend from "./view_user_spend"; import EnterProxyUrl from "./enter_proxy_url"; +import { message } from "antd"; import Navbar from "./navbar"; import { useSearchParams } from "next/navigation"; import { jwtDecode } from "jwt-decode"; -const proxyBaseUrl = null; -// const proxyBaseUrl = "http://localhost:4000" // http://localhost:4000 +const isLocal = process.env.NODE_ENV === "development"; +console.log("isLocal:", isLocal); +const proxyBaseUrl = isLocal ? "http://localhost:4000" : null; type UserSpendData = { spend: number; max_budget?: number | null; -} +}; const UserDashboard = () => { const [data, setData] = useState(null); // Keep the initialization of state here - const [userSpendData, setUserSpendData] = useState(null); + const [userSpendData, setUserSpendData] = useState( + null + ); // Assuming useSearchParams() hook exists and works in your setup const searchParams = useSearchParams(); @@ -30,19 +34,19 @@ const UserDashboard = () => { const token = searchParams.get("token"); const [accessToken, setAccessToken] = useState(null); const [userRole, setUserRole] = useState(null); - + const [userEmail, setUserEmail] = useState(null); function formatUserRole(userRole: string) { if (!userRole) { return "Undefined Role"; } - + console.log(`Received user role: ${userRole}`); switch (userRole.toLowerCase()) { case "app_owner": return "App Owner"; case "demo_app_owner": - return "AppOwner"; - case "admin": + return "App Owner"; + case "app_admin": return "Admin"; case "app_user": return "App User"; @@ -53,7 +57,7 @@ const UserDashboard = () => { // Moved useEffect inside the component and used a condition to run fetch only if the params are available useEffect(() => { - if (token){ + if (token) { const decoded = jwtDecode(token) as { [key: string]: any }; if (decoded) { // cast decoded to dictionary @@ -71,17 +75,19 @@ const UserDashboard = () => { } else { console.log("User role not defined"); } + + if (decoded.user_email) { + setUserEmail(decoded.user_email); + } else { + console.log(`User Email is not set ${decoded}`); + } } } - if (userID && accessToken && !data) { + if (userID && accessToken && userRole && !data) { const fetchData = async () => { try { - const response = await userInfoCall( - accessToken, - userID - ); - console.log("Response:", response); - setUserSpendData(response["user_info"]) + const response = await userInfoCall(accessToken, userID, userRole); + setUserSpendData(response["user_info"]); setData(response["keys"]); // Assuming this is the correct path to your data } catch (error) { console.error("There was an error fetching the data", error); @@ -93,53 +99,45 @@ const UserDashboard = () => { }, [userID, token, accessToken, data]); if (userID == null || token == null) { - - // Now you can construct the full URL - const url = proxyBaseUrl ? `${proxyBaseUrl}/sso/key/generate` : `/sso/key/generate`; + const url = proxyBaseUrl + ? `${proxyBaseUrl}/sso/key/generate` + : `/sso/key/generate`; console.log("Full URL:", url); window.location.href = url; return null; - } - else if (accessToken == null) { + } else if (accessToken == null) { return null; } if (userRole == null) { - setUserRole("App Owner") + setUserRole("App Owner"); } - + return (
- + - - - - - - + + + + + +
- ); }; -export default UserDashboard; \ No newline at end of file +export default UserDashboard; diff --git a/ui/litellm-dashboard/src/components/view_key_spend_report.tsx b/ui/litellm-dashboard/src/components/view_key_spend_report.tsx index 40961325e..e90401e5b 100644 --- a/ui/litellm-dashboard/src/components/view_key_spend_report.tsx +++ b/ui/litellm-dashboard/src/components/view_key_spend_report.tsx @@ -1,8 +1,26 @@ "use client"; import React, { useState, useEffect } from "react"; -import { Button as Button2, Modal, Form, Input, InputNumber, Select, message } from "antd"; -import { Button, Text, Card, Table, BarChart, Title, Subtitle, BarList, Metric } from "@tremor/react"; +import { + Button as Button2, + Modal, + Form, + Input, + InputNumber, + Select, + message, +} from "antd"; +import { + Button, + Text, + Card, + Table, + BarChart, + Title, + Subtitle, + BarList, + Metric, +} from "@tremor/react"; import { keySpendLogsCall } from "./networking"; interface ViewKeySpendReportProps { @@ -14,18 +32,30 @@ interface ViewKeySpendReportProps { } type ResponseValueType = { - startTime: string; // Assuming startTime is a string, adjust it if it's of a different type - spend: number; // Assuming spend is a number, adjust it if it's of a different type - user: string; // Assuming user is a string, adjust it if it's of a different type - }; + startTime: string; // Assuming startTime is a string, adjust it if it's of a different type + spend: number; // Assuming spend is a number, adjust it if it's of a different type + user: string; // Assuming user is a string, adjust it if it's of a different type +}; -const ViewKeySpendReport: React.FC = ({ token, accessToken, keySpend, keyBudget, keyName }) => { +const ViewKeySpendReport: React.FC = ({ + token, + accessToken, + keySpend, + keyBudget, + keyName, +}) => { const [isModalVisible, setIsModalVisible] = useState(false); - const [data, setData] = useState<{ day: string; spend: number; }[] | null>(null); - const [userData, setUserData] = useState<{ name: string; value: number; }[] | null>(null); + const [data, setData] = useState<{ day: string; spend: number }[] | null>( + null + ); + const [userData, setUserData] = useState< + { name: string; value: number }[] | null + >(null); const showModal = () => { + console.log("Show Modal triggered"); setIsModalVisible(true); + fetchData(); }; const handleOk = () => { @@ -41,68 +71,79 @@ const ViewKeySpendReport: React.FC = ({ token, accessTo try { if (accessToken == null || token == null) { return; - } - const response = await keySpendLogsCall(accessToken=accessToken, token=token); + } + console.log(`accessToken: ${accessToken}; token: ${token}`); + const response = await keySpendLogsCall( + (accessToken = accessToken), + (token = token) + ); console.log("Response:", response); // loop through response // get spend, startTime for each element, place in new array - - const pricePerDay: Record = (Object.values(response) as ResponseValueType[]).reduce((acc: Record, value) => { + const pricePerDay: Record = ( + Object.values(response) as ResponseValueType[] + ).reduce((acc: Record, value) => { const startTime = new Date(value.startTime); - const day = new Intl.DateTimeFormat('en-US', { day: '2-digit', month: 'short' }).format(startTime); - + const day = new Intl.DateTimeFormat("en-US", { + day: "2-digit", + month: "short", + }).format(startTime); + acc[day] = (acc[day] || 0) + value.spend; - + return acc; }, {}); - - + // sort pricePerDay by day // Convert object to array of key-value pairs - const pricePerDayArray = Object.entries(pricePerDay); + const pricePerDayArray = Object.entries(pricePerDay); - // Sort the array based on the date (key) - pricePerDayArray.sort(([aKey], [bKey]) => { - const dateA = new Date(aKey); - const dateB = new Date(bKey); - return dateA.getTime() - dateB.getTime(); - }); - - // Convert the sorted array back to an object - const sortedPricePerDay = Object.fromEntries(pricePerDayArray); + // Sort the array based on the date (key) + pricePerDayArray.sort(([aKey], [bKey]) => { + const dateA = new Date(aKey); + const dateB = new Date(bKey); + return dateA.getTime() - dateB.getTime(); + }); + // Convert the sorted array back to an object + const sortedPricePerDay = Object.fromEntries(pricePerDayArray); console.log(sortedPricePerDay); - - const pricePerUser: Record = (Object.values(response) as ResponseValueType[]).reduce((acc: Record, value) => { + + const pricePerUser: Record = ( + Object.values(response) as ResponseValueType[] + ).reduce((acc: Record, value) => { const user = value.user; acc[user] = (acc[user] || 0) + value.spend; - + return acc; }, {}); - - + console.log(pricePerDay); console.log(pricePerUser); const arrayBarChart = []; - // [ - // { - // "day": "02 Feb", - // "spend": pricePerDay["02 Feb"], - // } - // ] + // [ + // { + // "day": "02 Feb", + // "spend": pricePerDay["02 Feb"], + // } + // ] for (const [key, value] of Object.entries(sortedPricePerDay)) { arrayBarChart.push({ day: key, spend: value }); } - // get 5 most expensive users - const sortedUsers = Object.entries(pricePerUser).sort((a, b) => b[1] - a[1]); + const sortedUsers = Object.entries(pricePerUser).sort( + (a, b) => b[1] - a[1] + ); const top5Users = sortedUsers.slice(0, 5); - const userChart = top5Users.map(([key, value]) => ({ name: key, value: value })); - + const userChart = top5Users.map(([key, value]) => ({ + name: key, + value: value, + })); + setData(arrayBarChart); setUserData(userChart); console.log("arrayBarChart:", arrayBarChart); @@ -112,11 +153,10 @@ const ViewKeySpendReport: React.FC = ({ token, accessTo } }; - useEffect(() => { - // Fetch data only when the token changes - fetchData(); - }, [token]); // Dependency array containing the 'token' variable - + // useEffect(() => { + // // Fetch data only when the token changes + // fetchData(); + // }, [token]); // Dependency array containing the 'token' variable if (!token) { return null; @@ -134,33 +174,28 @@ const ViewKeySpendReport: React.FC = ({ token, accessTo onCancel={handleCancel} footer={null} > - Key Name: {keyName} + Key Name: {keyName} Monthly Spend ${keySpend} - {data && ( + {data && ( - )} - - Top 5 Users Spend (USD) - - {userData && ( - - )} - - + )} + + Top 5 Users Spend (USD) + + {userData && ( + + )} + ); diff --git a/ui/litellm-dashboard/src/components/view_key_table.tsx b/ui/litellm-dashboard/src/components/view_key_table.tsx index 8522a6bb1..4813bbe4e 100644 --- a/ui/litellm-dashboard/src/components/view_key_table.tsx +++ b/ui/litellm-dashboard/src/components/view_key_table.tsx @@ -1,5 +1,5 @@ "use client"; -import React, { useEffect } from "react"; +import React, { useEffect, useState } from "react"; import { keyDeleteCall } from "./networking"; import { StatusOnlineIcon, TrashIcon } from "@heroicons/react/outline"; import { @@ -32,6 +32,8 @@ const ViewKeyTable: React.FC = ({ data, setData, }) => { + const [isButtonClicked, setIsButtonClicked] = useState(false); + const handleDelete = async (token: String) => { if (data == null) { return; @@ -116,8 +118,13 @@ const ViewKeyTable: React.FC = ({ /> - - + ); From a9a4f4cf0f4bdc11baf19a56250120671ddb4beb Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 5 Feb 2024 21:43:17 -0800 Subject: [PATCH 02/56] test(test_key_generate_dynamodb.py): fix test --- litellm/tests/test_key_generate_dynamodb.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/litellm/tests/test_key_generate_dynamodb.py b/litellm/tests/test_key_generate_dynamodb.py index 61d0ff6a6..573bd944d 100644 --- a/litellm/tests/test_key_generate_dynamodb.py +++ b/litellm/tests/test_key_generate_dynamodb.py @@ -490,8 +490,13 @@ def test_dynamo_db_migration(custom_db_client): try: async def test(): + request = GenerateKeyRequest(max_budget=1) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key bearer_token = ( - "Bearer " + "sk-elJDL2pOEjcAuC7zD4psAg" + "Bearer " + generated_key ) # this works with ishaan's db, it's a never expiring key request = Request(scope={"type": "http"}) @@ -508,4 +513,4 @@ def test_dynamo_db_migration(custom_db_client): asyncio.run(test()) except Exception as e: - pytest.fail(f"An exception occurred - {str(e)}") + pytest.fail(f"An exception occurred - {traceback.format_exc()}") From 7aa4075b05a9d39909d5df00c0c0c6cd79adc79a Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Mon, 5 Feb 2024 17:07:57 -0800 Subject: [PATCH 03/56] Update model_prices_and_context_window.json --- model_prices_and_context_window.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index b6ded001c..4c28bdbe8 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -156,8 +156,8 @@ "max_tokens": 4097, "max_input_tokens": 4097, "max_output_tokens": 4096, - "input_cost_per_token": 0.000012, - "output_cost_per_token": 0.000016, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000006, "litellm_provider": "openai", "mode": "chat" }, From ada14b06e0cbfe93906c7259056fb87e7bc4c1dd Mon Sep 17 00:00:00 2001 From: John HU Date: Mon, 5 Feb 2024 17:30:39 -0800 Subject: [PATCH 04/56] Fix admin UI title and description --- ui/litellm-dashboard/src/app/layout.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/litellm-dashboard/src/app/layout.tsx b/ui/litellm-dashboard/src/app/layout.tsx index 3314e4780..a04a0d66e 100644 --- a/ui/litellm-dashboard/src/app/layout.tsx +++ b/ui/litellm-dashboard/src/app/layout.tsx @@ -5,8 +5,8 @@ import "./globals.css"; const inter = Inter({ subsets: ["latin"] }); export const metadata: Metadata = { - title: "Create Next App", - description: "Generated by create next app", + title: "🚅 LiteLLM", + description: "LiteLLM Proxy Admin UI", }; export default function RootLayout({ From f8380c638fc6b1566532a91dca85adc53255f8a4 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 5 Feb 2024 16:16:15 -0800 Subject: [PATCH 05/56] fix(langfuse.py): support logging failed llm api calls to langfuse --- litellm/integrations/langfuse.py | 198 +++++++++++++++++++------------ litellm/utils.py | 58 ++++----- 2 files changed, 151 insertions(+), 105 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index e62dccdc4..82de33366 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -55,8 +55,21 @@ class LangFuseLogger: else: self.upstream_langfuse = None + # def log_error(kwargs, response_obj, start_time, end_time): + # generation = trace.generation( + # level ="ERROR" # can be any of DEBUG, DEFAULT, WARNING or ERROR + # status_message='error' # can be any string (e.g. stringified stack trace or error body) + # ) def log_event( - self, kwargs, response_obj, start_time, end_time, user_id, print_verbose + self, + kwargs, + response_obj, + start_time, + end_time, + user_id, + print_verbose, + level="DEFAULT", + status_message=None, ): # Method definition @@ -84,37 +97,49 @@ class LangFuseLogger: pass # end of processing langfuse ######################## - if kwargs.get("call_type", None) == "embedding" or isinstance( - response_obj, litellm.EmbeddingResponse + if ( + level == "ERROR" + and status_message is not None + and isinstance(status_message, str) + ): + input = prompt + output = status_message + elif response_obj is not None and ( + kwargs.get("call_type", None) == "embedding" + or isinstance(response_obj, litellm.EmbeddingResponse) ): input = prompt output = response_obj["data"] - else: + elif response_obj is not None: input = prompt output = response_obj["choices"][0]["message"].json() - print_verbose(f"OUTPUT IN LANGFUSE: {output}; original: {response_obj}") - self._log_langfuse_v2( - user_id, - metadata, - output, - start_time, - end_time, - kwargs, - optional_params, - input, - response_obj, - print_verbose, - ) if self._is_langfuse_v2() else self._log_langfuse_v1( - user_id, - metadata, - output, - start_time, - end_time, - kwargs, - optional_params, - input, - response_obj, - ) + print(f"OUTPUT IN LANGFUSE: {output}; original: {response_obj}") + if self._is_langfuse_v2(): + self._log_langfuse_v2( + user_id, + metadata, + output, + start_time, + end_time, + kwargs, + optional_params, + input, + response_obj, + level, + print_verbose, + ) + elif response_obj is not None: + self._log_langfuse_v1( + user_id, + metadata, + output, + start_time, + end_time, + kwargs, + optional_params, + input, + response_obj, + ) self.Langfuse.flush() print_verbose( @@ -123,15 +148,15 @@ class LangFuseLogger: verbose_logger.info(f"Langfuse Layer Logging - logging success") except: traceback.print_exc() - print_verbose(f"Langfuse Layer Error - {traceback.format_exc()}") + print(f"Langfuse Layer Error - {traceback.format_exc()}") pass async def _async_log_event( self, kwargs, response_obj, start_time, end_time, user_id, print_verbose ): - self.log_event( - kwargs, response_obj, start_time, end_time, user_id, print_verbose - ) + """ + TODO: support async calls when langfuse is truly async + """ def _is_langfuse_v2(self): import langfuse @@ -193,57 +218,78 @@ class LangFuseLogger: optional_params, input, response_obj, + level, print_verbose, ): import langfuse - tags = [] - supports_tags = Version(langfuse.version.__version__) >= Version("2.6.3") - supports_costs = Version(langfuse.version.__version__) >= Version("2.7.3") + try: + tags = [] + supports_tags = Version(langfuse.version.__version__) >= Version("2.6.3") + supports_costs = Version(langfuse.version.__version__) >= Version("2.7.3") - print_verbose(f"Langfuse Layer Logging - logging to langfuse v2 ") + print_verbose(f"Langfuse Layer Logging - logging to langfuse v2 ") - generation_name = metadata.get("generation_name", None) - if generation_name is None: - # just log `litellm-{call_type}` as the generation name - generation_name = f"litellm-{kwargs.get('call_type', 'completion')}" + generation_name = metadata.get("generation_name", None) + if generation_name is None: + # just log `litellm-{call_type}` as the generation name + generation_name = f"litellm-{kwargs.get('call_type', 'completion')}" - trace_params = { - "name": generation_name, - "input": input, - "output": output, - "user_id": metadata.get("trace_user_id", user_id), - "id": metadata.get("trace_id", None), - "session_id": metadata.get("session_id", None), - } - cost = kwargs["response_cost"] - print_verbose(f"trace: {cost}") - if supports_tags: - for key, value in metadata.items(): - tags.append(f"{key}:{value}") - if "cache_hit" in kwargs: - tags.append(f"cache_hit:{kwargs['cache_hit']}") - trace_params.update({"tags": tags}) + trace_params = { + "name": generation_name, + "input": input, + "user_id": metadata.get("trace_user_id", user_id), + "id": metadata.get("trace_id", None), + "session_id": metadata.get("session_id", None), + } - trace = self.Langfuse.trace(**trace_params) + if level == "ERROR": + trace_params["status_message"] = output + else: + trace_params["output"] = output - # get generation_id - generation_id = None - if response_obj.get("id", None) is not None: - generation_id = litellm.utils.get_logging_id(start_time, response_obj) - trace.generation( - name=generation_name, - id=metadata.get("generation_id", generation_id), - startTime=start_time, - endTime=end_time, - model=kwargs["model"], - modelParameters=optional_params, - input=input, - output=output, - usage={ - "prompt_tokens": response_obj["usage"]["prompt_tokens"], - "completion_tokens": response_obj["usage"]["completion_tokens"], - "total_cost": cost if supports_costs else None, - }, - metadata=metadata, - ) + cost = kwargs.get("response_cost", None) + print_verbose(f"trace: {cost}") + if supports_tags: + for key, value in metadata.items(): + tags.append(f"{key}:{value}") + if "cache_hit" in kwargs: + tags.append(f"cache_hit:{kwargs['cache_hit']}") + trace_params.update({"tags": tags}) + + trace = self.Langfuse.trace(**trace_params) + + if level == "ERROR": + trace.generation( + level="ERROR", # can be any of DEBUG, DEFAULT, WARNING or ERROR + status_message=output, # can be any string (e.g. stringified stack trace or error body) + ) + print(f"SUCCESSFULLY LOGGED ERROR") + else: + # get generation_id + generation_id = None + if ( + response_obj is not None + and response_obj.get("id", None) is not None + ): + generation_id = litellm.utils.get_logging_id( + start_time, response_obj + ) + trace.generation( + name=generation_name, + id=metadata.get("generation_id", generation_id), + startTime=start_time, + endTime=end_time, + model=kwargs["model"], + modelParameters=optional_params, + input=input, + output=output, + usage={ + "prompt_tokens": response_obj["usage"]["prompt_tokens"], + "completion_tokens": response_obj["usage"]["completion_tokens"], + "total_cost": cost if supports_costs else None, + }, + metadata=metadata, + ) + except Exception as e: + print(f"Langfuse Layer Error - {traceback.format_exc()}") diff --git a/litellm/utils.py b/litellm/utils.py index e56ba879f..1e83a319f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1636,34 +1636,6 @@ class Logging: end_time=end_time, print_verbose=print_verbose, ) - if callback == "langfuse": - global langFuseLogger - print_verbose("reaches Async langfuse for logging!") - kwargs = {} - for k, v in self.model_call_details.items(): - if ( - k != "original_response" - ): # copy.deepcopy raises errors as this could be a coroutine - kwargs[k] = v - # this only logs streaming once, complete_streaming_response exists i.e when stream ends - if self.stream: - if "complete_streaming_response" not in kwargs: - return - else: - print_verbose( - "reaches Async langfuse for streaming logging!" - ) - result = kwargs["complete_streaming_response"] - if langFuseLogger is None: - langFuseLogger = LangFuseLogger() - await langFuseLogger._async_log_event( - kwargs=kwargs, - response_obj=result, - start_time=start_time, - end_time=end_time, - user_id=kwargs.get("user", None), - print_verbose=print_verbose, - ) except: print_verbose( f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}" @@ -1788,9 +1760,37 @@ class Logging: response_obj=result, kwargs=self.model_call_details, ) + elif callback == "langfuse": + global langFuseLogger + verbose_logger.debug("reaches langfuse for logging!") + kwargs = {} + for k, v in self.model_call_details.items(): + if ( + k != "original_response" + ): # copy.deepcopy raises errors as this could be a coroutine + kwargs[k] = v + # this only logs streaming once, complete_streaming_response exists i.e when stream ends + if langFuseLogger is None or ( + self.langfuse_public_key != langFuseLogger.public_key + and self.langfuse_secret != langFuseLogger.secret_key + ): + langFuseLogger = LangFuseLogger( + langfuse_public_key=self.langfuse_public_key, + langfuse_secret=self.langfuse_secret, + ) + langFuseLogger.log_event( + start_time=start_time, + end_time=end_time, + response_obj=None, + user_id=kwargs.get("user", None), + print_verbose=print_verbose, + status_message=str(exception), + level="ERROR", + kwargs=self.model_call_details, + ) except Exception as e: print_verbose( - f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {traceback.format_exc()}" + f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {str(e)}" ) print_verbose( f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}" From d09aa560f353b646e67f4f59faa2d71dc8f8f058 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 22:37:05 -0800 Subject: [PATCH 06/56] (docs) upperbound_key_generate_params --- docs/my-website/docs/proxy/virtual_keys.md | 16 ++++++++++++++++ .../model_prices_and_context_window_backup.json | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/proxy/virtual_keys.md b/docs/my-website/docs/proxy/virtual_keys.md index dd5edc6da..c51bfc0ac 100644 --- a/docs/my-website/docs/proxy/virtual_keys.md +++ b/docs/my-website/docs/proxy/virtual_keys.md @@ -352,6 +352,22 @@ Request Params: } ``` +## Upperbound /key/generate params +Use this, if you need to control the upperbound that users can use for `max_budget`, `budget_duration` or any `key/generate` param per key. + +Set `litellm_settings:upperbound_key_generate_params`: +```yaml +litellm_settings: + upperbound_key_generate_params: + max_budget: 100 # upperbound of $100, for all /key/generate requests + duration: "30d" # upperbound of 30 days for all /key/generate requests +``` + +** Expected Behavior ** + +- Send a `/key/generate` request with `max_budget=200` +- Key will be created with `max_budget=100` since 100 is the upper bound + ## Default /key/generate params Use this, if you need to control the default `max_budget` or any `key/generate` param per key. diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index b6ded001c..4c28bdbe8 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -156,8 +156,8 @@ "max_tokens": 4097, "max_input_tokens": 4097, "max_output_tokens": 4096, - "input_cost_per_token": 0.000012, - "output_cost_per_token": 0.000016, + "input_cost_per_token": 0.000003, + "output_cost_per_token": 0.000006, "litellm_provider": "openai", "mode": "chat" }, From 0871327ff0bc075218202480032626455fc61aa3 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 22:38:47 -0800 Subject: [PATCH 07/56] (feat) upperbound_key_generate_params --- litellm/__init__.py | 1 + litellm/proxy/proxy_server.py | 69 +++++++++++++++++++++++++---------- 2 files changed, 51 insertions(+), 19 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index 3f2a1e4b4..26b761c64 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -146,6 +146,7 @@ suppress_debug_info = False dynamodb_table_name: Optional[str] = None s3_callback_params: Optional[Dict] = None default_key_generate_params: Optional[Dict] = None +upperbound_key_generate_params: Optional[Dict] = None default_team_settings: Optional[List] = None #### RELIABILITY #### request_timeout: Optional[float] = 6000 diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 0fe6997ee..c2d3d194a 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1401,6 +1401,26 @@ class ProxyConfig: proxy_config = ProxyConfig() +def _duration_in_seconds(duration: str): + match = re.match(r"(\d+)([smhd]?)", duration) + if not match: + raise ValueError("Invalid duration format") + + value, unit = match.groups() + value = int(value) + + if unit == "s": + return value + elif unit == "m": + return value * 60 + elif unit == "h": + return value * 3600 + elif unit == "d": + return value * 86400 + else: + raise ValueError("Unsupported duration unit") + + async def generate_key_helper_fn( duration: Optional[str], models: list, @@ -1435,25 +1455,6 @@ async def generate_key_helper_fn( if token is None: token = f"sk-{secrets.token_urlsafe(16)}" - def _duration_in_seconds(duration: str): - match = re.match(r"(\d+)([smhd]?)", duration) - if not match: - raise ValueError("Invalid duration format") - - value, unit = match.groups() - value = int(value) - - if unit == "s": - return value - elif unit == "m": - return value * 60 - elif unit == "h": - return value * 3600 - elif unit == "d": - return value * 86400 - else: - raise ValueError("Unsupported duration unit") - if duration is None: # allow tokens that never expire expires = None else: @@ -2674,6 +2675,36 @@ async def generate_key_fn( elif key == "metadata" and value == {}: setattr(data, key, litellm.default_key_generate_params.get(key, {})) + # check if user set default key/generate params on config.yaml + if litellm.upperbound_key_generate_params is not None: + for elem in data: + # if key in litellm.upperbound_key_generate_params, use the min of value and litellm.upperbound_key_generate_params[key] + key, value = elem + if value is not None and key in litellm.upperbound_key_generate_params: + # if value is float/int + if key in [ + "max_budget", + "max_parallel_requests", + "tpm_limit", + "rpm_limit", + ]: + if value > litellm.upperbound_key_generate_params[key]: + # directly compare floats/ints + setattr( + data, key, litellm.upperbound_key_generate_params[key] + ) + elif key == "budget_duration": + # budgets are in 1s, 1m, 1h, 1d, 1m (30s, 30m, 30h, 30d, 30m) + # compare the duration in seconds and max duration in seconds + upperbound_budget_duration = _duration_in_seconds( + duration=litellm.upperbound_key_generate_params[key] + ) + user_set_budget_duration = _duration_in_seconds(duration=value) + if user_set_budget_duration > upperbound_budget_duration: + setattr( + data, key, litellm.upperbound_key_generate_params[key] + ) + data_json = data.json() # type: ignore # if we get max_budget passed to /key/generate, then use it as key_max_budget. Since generate_key_helper_fn is used to make new users From 2ce4153acea35080888fada81590556b7ba7ea15 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 22:39:36 -0800 Subject: [PATCH 08/56] (test) test_upperbound_key_params --- litellm/tests/test_key_generate_prisma.py | 34 +++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/litellm/tests/test_key_generate_prisma.py b/litellm/tests/test_key_generate_prisma.py index de2616859..b4c86afb2 100644 --- a/litellm/tests/test_key_generate_prisma.py +++ b/litellm/tests/test_key_generate_prisma.py @@ -1279,6 +1279,40 @@ async def test_default_key_params(prisma_client): pytest.fail(f"Got exception {e}") +@pytest.mark.asyncio() +async def test_upperbound_key_params(prisma_client): + """ + - create key + - get key info + - assert key_name is not null + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + litellm.upperbound_key_generate_params = { + "max_budget": 0.001, + "budget_duration": "1m", + } + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest( + max_budget=200000, + budget_duration="30d", + ) + key = await generate_key_fn(request) + generated_key = key.key + + result = await info_key_fn(key=generated_key) + key_info = result["info"] + # assert it used the upper bound for max_budget, and budget_duration + assert key_info["max_budget"] == 0.001 + assert key_info["budget_duration"] == "1m" + + print(result) + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + def test_get_bearer_token(): from litellm.proxy.proxy_server import _get_bearer_token From 1ef8b459cebc33c68ff269b0d0b90773459f6968 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 22:40:52 -0800 Subject: [PATCH 09/56] (feat) proxy - upperbound params /key/generate --- litellm/proxy/proxy_config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 874049a75..bd844bd7b 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -73,6 +73,9 @@ litellm_settings: max_budget: 1.5000 models: ["azure-gpt-3.5"] duration: None + upperbound_key_generate_params: + max_budget: 100 + duration: "30d" # cache: True # setting callback class # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] From a24041b624063ab4f2bb5bc60f3ff9457d9b9898 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 22:51:08 -0800 Subject: [PATCH 10/56] (fix) proxy startup test --- .../test_configs/test_config_no_auth.yaml | 95 ------------------- 1 file changed, 95 deletions(-) diff --git a/litellm/tests/test_configs/test_config_no_auth.yaml b/litellm/tests/test_configs/test_config_no_auth.yaml index 8441018e3..ccebe016d 100644 --- a/litellm/tests/test_configs/test_config_no_auth.yaml +++ b/litellm/tests/test_configs/test_config_no_auth.yaml @@ -9,21 +9,11 @@ model_list: api_key: os.environ/AZURE_CANADA_API_KEY model: azure/gpt-35-turbo model_name: azure-model -- litellm_params: - api_base: https://gateway.ai.cloudflare.com/v1/0399b10e77ac6668c80404a5ff49eb37/litellm-test/azure-openai/openai-gpt-4-test-v-1 - api_key: os.environ/AZURE_API_KEY - model: azure/chatgpt-v-2 - model_name: azure-cloudflare-model - litellm_params: api_base: https://openai-france-1234.openai.azure.com api_key: os.environ/AZURE_FRANCE_API_KEY model: azure/gpt-turbo model_name: azure-model -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - model_name: test_openai_models - litellm_params: model: gpt-3.5-turbo model_info: @@ -36,93 +26,8 @@ model_list: description: this is a test openai model id: 4d1ee26c-abca-450c-8744-8e87fd6755e9 model_name: test_openai_models -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: 00e19c0f-b63d-42bb-88e9-016fb0c60764 - model_name: test_openai_models -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: 79fc75bf-8e1b-47d5-8d24-9365a854af03 - model_name: test_openai_models -- litellm_params: - api_base: os.environ/AZURE_API_BASE - api_key: os.environ/AZURE_API_KEY - api_version: 2023-07-01-preview - model: azure/azure-embedding-model - model_info: - mode: embedding - model_name: azure-embedding-model -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: 55848c55-4162-40f9-a6e2-9a722b9ef404 - model_name: test_openai_models -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: 34339b1e-e030-4bcc-a531-c48559f10ce4 - model_name: test_openai_models -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: f6f74e14-ac64-4403-9365-319e584dcdc5 - model_name: test_openai_models -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: 9b1ef341-322c-410a-8992-903987fef439 - model_name: test_openai_models - litellm_params: model: bedrock/amazon.titan-embed-text-v1 model_info: mode: embedding model_name: amazon-embeddings -- litellm_params: - model: sagemaker/berri-benchmarking-gpt-j-6b-fp16 - model_info: - mode: embedding - model_name: GPT-J 6B - Sagemaker Text Embedding (Internal) -- litellm_params: - model: dall-e-3 - model_info: - mode: image_generation - model_name: dall-e-3 -- litellm_params: - api_base: os.environ/AZURE_SWEDEN_API_BASE - api_key: os.environ/AZURE_SWEDEN_API_KEY - api_version: 2023-12-01-preview - model: azure/dall-e-3-test - model_info: - mode: image_generation - model_name: dall-e-3 -- litellm_params: - api_base: os.environ/AZURE_API_BASE - api_key: os.environ/AZURE_API_KEY - api_version: 2023-06-01-preview - model: azure/ - model_info: - mode: image_generation - model_name: dall-e-2 -- litellm_params: - api_base: os.environ/AZURE_API_BASE - api_key: os.environ/AZURE_API_KEY - api_version: 2023-07-01-preview - model: azure/azure-embedding-model - model_info: - base_model: text-embedding-ada-002 - mode: embedding - model_name: text-embedding-ada-002 -- litellm_params: - model: gpt-3.5-turbo - model_info: - description: this is a test openai model - id: 34cb2419-7c63-44ae-a189-53f1d1ce5953 - model_name: test_openai_models From 6ea17be09834b0b04a6cb273ca8fc795583899d7 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 22:53:31 -0800 Subject: [PATCH 11/56] (ci/cd) print debug info for test_proxy_gunicorn_startup_config_dict --- litellm/tests/test_proxy_startup.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/litellm/tests/test_proxy_startup.py b/litellm/tests/test_proxy_startup.py index 650e2f8a7..a846c9f4a 100644 --- a/litellm/tests/test_proxy_startup.py +++ b/litellm/tests/test_proxy_startup.py @@ -33,6 +33,11 @@ def test_proxy_gunicorn_startup_direct_config(): Test both approaches """ try: + from litellm._logging import verbose_proxy_logger, verbose_router_logger + import logging + + verbose_proxy_logger.setLevel(level=logging.DEBUG) + verbose_router_logger.setLevel(level=logging.DEBUG) filepath = os.path.dirname(os.path.abspath(__file__)) # test with worker_config = config yaml config_fp = f"{filepath}/test_configs/test_config_no_auth.yaml" @@ -48,6 +53,11 @@ def test_proxy_gunicorn_startup_direct_config(): def test_proxy_gunicorn_startup_config_dict(): try: + from litellm._logging import verbose_proxy_logger, verbose_router_logger + import logging + + verbose_proxy_logger.setLevel(level=logging.DEBUG) + verbose_router_logger.setLevel(level=logging.DEBUG) filepath = os.path.dirname(os.path.abspath(__file__)) # test with worker_config = config yaml config_fp = f"{filepath}/test_configs/test_config_no_auth.yaml" From f2070d025ebe778b2a4319e682acfd2a51ebb81e Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 06:46:49 -0800 Subject: [PATCH 12/56] (fix) test_normal_router_tpm_limit --- litellm/tests/test_parallel_request_limiter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/litellm/tests/test_parallel_request_limiter.py b/litellm/tests/test_parallel_request_limiter.py index 34dc0e3b5..528bb19d2 100644 --- a/litellm/tests/test_parallel_request_limiter.py +++ b/litellm/tests/test_parallel_request_limiter.py @@ -379,6 +379,7 @@ async def test_normal_router_tpm_limit(): ) except Exception as e: + print("Exception on test_normal_router_tpm_limit", e) assert e.status_code == 429 From 3409ac76904dc4defaf7733d3c920b0dffcaf003 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 07:22:54 -0800 Subject: [PATCH 13/56] fix(ollama_chat.py): fix ollama chat completion token counting --- litellm/llms/ollama_chat.py | 8 ++++++-- litellm/utils.py | 3 --- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 95ff8dfaa..3628ae290 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -320,11 +320,15 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj): model_response["choices"][0]["message"] = message else: model_response["choices"][0]["message"] = response_json["message"] + model_response["created"] = int(time.time()) - model_response["model"] = "ollama/" + data["model"] + model_response["model"] = "ollama_chat/" + data["model"] prompt_tokens = response_json.get("prompt_eval_count", litellm.token_counter(messages=data["messages"])) # type: ignore completion_tokens = response_json.get( - "eval_count", litellm.token_counter(text=response_json["message"]) + "eval_count", + litellm.token_counter( + text=response_json["message"]["content"], count_response_tokens=True + ), ) model_response["usage"] = litellm.Usage( prompt_tokens=prompt_tokens, diff --git a/litellm/utils.py b/litellm/utils.py index 1e83a319f..8491a1d5e 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -983,9 +983,6 @@ class Logging: verbose_logger.debug( f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n" ) - verbose_logger.debug( - f"Logging Details Post-API Call: LiteLLM Params: {self.model_call_details}" - ) if self.logger_fn and callable(self.logger_fn): try: self.logger_fn( From 34fcb3c9845bc74612bc9a85aa68a49c35b72a37 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 07:26:13 -0800 Subject: [PATCH 14/56] fix(utils.py): use print_verbose for statements, so debug can be seen when running sdk --- litellm/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index 8491a1d5e..5ccb85ef0 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -980,7 +980,7 @@ class Logging: self.model_call_details["log_event_type"] = "post_api_call" # User Logging -> if you pass in a custom logging function - verbose_logger.debug( + print_verbose( f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n" ) if self.logger_fn and callable(self.logger_fn): From f9b5e9ea62e11b04255d72842ef717528beff6a7 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 07:43:47 -0800 Subject: [PATCH 15/56] fix(ollama_chat.py): explicitly state if ollama call is streaming or not --- litellm/llms/ollama_chat.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 3628ae290..d1a439398 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -146,7 +146,12 @@ def get_ollama_response( optional_params[k] = v stream = optional_params.pop("stream", False) - data = {"model": model, "messages": messages, "options": optional_params} + data = { + "model": model, + "messages": messages, + "options": optional_params, + "stream": stream, + } ## LOGGING logging_obj.pre_call( input=None, From 26d0f895ff0e4e5edb294d097bb48df27d61ba42 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 07:44:04 -0800 Subject: [PATCH 16/56] =?UTF-8?q?bump:=20version=201.22.6=20=E2=86=92=201.?= =?UTF-8?q?22.7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 06dedbed6..be8c8966b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.22.6" +version = "1.22.7" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -69,7 +69,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.22.6" +version = "1.22.7" version_files = [ "pyproject.toml:^version" ] From 7b5543b9939c61b3a9afbf1ec0e393d69d1ca4d4 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 07:35:46 -0800 Subject: [PATCH 17/56] build(requirements.txt): update the proxy requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c9bd0e511..768e8dff3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ boto3==1.28.58 # aws bedrock/sagemaker calls redis==4.6.0 # caching prisma==0.11.0 # for db mangum==0.17.0 # for aws lambda functions -google-generativeai==0.1.0 # for vertex ai calls +google-generativeai==0.3.2 # for vertex ai calls async_generator==1.10.0 # for async ollama calls traceloop-sdk==0.5.3 # for open telemetry logging langfuse>=2.6.3 # for langfuse self-hosted logging From 4d76af89f3740312c1aa12c1dbd6dcc8ada92fa3 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 10:11:43 -0800 Subject: [PATCH 18/56] fix(ollama.py): support format for ollama --- litellm/llms/ollama.py | 10 +++++++++- litellm/llms/ollama_chat.py | 3 +++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index d0bc24af4..9339deb78 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -146,7 +146,15 @@ def get_ollama_response( optional_params[k] = v stream = optional_params.pop("stream", False) - data = {"model": model, "prompt": prompt, "options": optional_params} + format = optional_params.pop("format", None) + data = { + "model": model, + "prompt": prompt, + "options": optional_params, + "stream": stream, + } + if format is not None: + data["format"] = format ## LOGGING logging_obj.pre_call( diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index d1a439398..0311931b1 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -146,12 +146,15 @@ def get_ollama_response( optional_params[k] = v stream = optional_params.pop("stream", False) + format = optional_params.pop("format", None) data = { "model": model, "messages": messages, "options": optional_params, "stream": stream, } + if format is not None: + data["format"] = format ## LOGGING logging_obj.pre_call( input=None, From bf35619b969d7015e978ba90700c3586035e05dd Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 10:12:13 -0800 Subject: [PATCH 19/56] =?UTF-8?q?bump:=20version=201.22.7=20=E2=86=92=201.?= =?UTF-8?q?22.8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index be8c8966b..17d80ae8e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.22.7" +version = "1.22.8" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -69,7 +69,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.22.7" +version = "1.22.8" version_files = [ "pyproject.toml:^version" ] From 249482b3f75d27a11065fa5832e5ba4560c7e0ef Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:57:20 -0800 Subject: [PATCH 20/56] (ci/cd) run in verbose mode --- .circleci/config.yml | 2 +- litellm/tests/test_completion.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c1224159a..9a29ed07c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -80,7 +80,7 @@ jobs: command: | pwd ls - python -m pytest -vv litellm/tests/ -x --junitxml=test-results/junit.xml --durations=5 + python -m pytest -vv -s litellm/tests/ -x --junitxml=test-results/junit.xml --durations=5 no_output_timeout: 120m # Store test results diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index bd0301f20..e0ee05d4f 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -41,7 +41,7 @@ def test_completion_custom_provider_model_name(): messages=messages, logger_fn=logger_fn, ) - # Add any assertions here to check the, response + # Add any assertions here to check the,response print(response) print(response["choices"][0]["finish_reason"]) except litellm.Timeout as e: From ec5b81298917f553b234b3c7f2be495e548cb973 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 11:27:24 -0800 Subject: [PATCH 21/56] (fix) rename proxy startup test --- litellm/tests/{test_proxy_startup.py => test_aproxy_startup.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename litellm/tests/{test_proxy_startup.py => test_aproxy_startup.py} (100%) diff --git a/litellm/tests/test_proxy_startup.py b/litellm/tests/test_aproxy_startup.py similarity index 100% rename from litellm/tests/test_proxy_startup.py rename to litellm/tests/test_aproxy_startup.py From b1b5daf73d3e31d6ba3d8c4c3decdd9f19926c71 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 11:38:57 -0800 Subject: [PATCH 22/56] (fix) proxy_startup test --- litellm/tests/test_aproxy_startup.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/litellm/tests/test_aproxy_startup.py b/litellm/tests/test_aproxy_startup.py index a846c9f4a..024d69b1f 100644 --- a/litellm/tests/test_aproxy_startup.py +++ b/litellm/tests/test_aproxy_startup.py @@ -36,6 +36,11 @@ def test_proxy_gunicorn_startup_direct_config(): from litellm._logging import verbose_proxy_logger, verbose_router_logger import logging + # unset set DATABASE_URL in env for this test + # set prisma client to None + setattr(litellm.proxy.proxy_server, "prisma_client", None) + database_url = os.environ.pop("DATABASE_URL", None) + verbose_proxy_logger.setLevel(level=logging.DEBUG) verbose_router_logger.setLevel(level=logging.DEBUG) filepath = os.path.dirname(os.path.abspath(__file__)) @@ -49,6 +54,10 @@ def test_proxy_gunicorn_startup_direct_config(): pass else: pytest.fail(f"An exception occurred - {str(e)}") + finally: + # restore DATABASE_URL after the test + if database_url is not None: + os.environ["DATABASE_URL"] = database_url def test_proxy_gunicorn_startup_config_dict(): @@ -58,6 +67,11 @@ def test_proxy_gunicorn_startup_config_dict(): verbose_proxy_logger.setLevel(level=logging.DEBUG) verbose_router_logger.setLevel(level=logging.DEBUG) + # unset set DATABASE_URL in env for this test + # set prisma client to None + setattr(litellm.proxy.proxy_server, "prisma_client", None) + database_url = os.environ.pop("DATABASE_URL", None) + filepath = os.path.dirname(os.path.abspath(__file__)) # test with worker_config = config yaml config_fp = f"{filepath}/test_configs/test_config_no_auth.yaml" @@ -71,6 +85,10 @@ def test_proxy_gunicorn_startup_config_dict(): pass else: pytest.fail(f"An exception occurred - {str(e)}") + finally: + # restore DATABASE_URL after the test + if database_url is not None: + os.environ["DATABASE_URL"] = database_url # test_proxy_gunicorn_startup() From a1883a11611beeffbaa0d472a688becebe1054ff Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 12:22:16 -0800 Subject: [PATCH 23/56] (ci/cd) run pytest without -s --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9a29ed07c..c1224159a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -80,7 +80,7 @@ jobs: command: | pwd ls - python -m pytest -vv -s litellm/tests/ -x --junitxml=test-results/junit.xml --durations=5 + python -m pytest -vv litellm/tests/ -x --junitxml=test-results/junit.xml --durations=5 no_output_timeout: 120m # Store test results From 43cb836c4f01d3297e3f29a9c1001486c1af890d Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 12:22:24 -0800 Subject: [PATCH 24/56] (ci/cd) run again --- litellm/tests/test_completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index e0ee05d4f..bd0301f20 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -41,7 +41,7 @@ def test_completion_custom_provider_model_name(): messages=messages, logger_fn=logger_fn, ) - # Add any assertions here to check the,response + # Add any assertions here to check the, response print(response) print(response["choices"][0]["finish_reason"]) except litellm.Timeout as e: From f8491feebdfa61a1515e21ad54795142ab8d3b70 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 12:43:28 -0800 Subject: [PATCH 25/56] (fix) parallel_request_limiter debug --- litellm/proxy/hooks/parallel_request_limiter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/hooks/parallel_request_limiter.py b/litellm/proxy/hooks/parallel_request_limiter.py index ca60421a5..48cf5b779 100644 --- a/litellm/proxy/hooks/parallel_request_limiter.py +++ b/litellm/proxy/hooks/parallel_request_limiter.py @@ -130,7 +130,9 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger): "current_rpm": current["current_rpm"] + 1, } - self.print_verbose(f"updated_value in success call: {new_val}") + self.print_verbose( + f"updated_value in success call: {new_val}, precise_minute: {precise_minute}" + ) self.user_api_key_cache.set_cache( request_count_api_key, new_val, ttl=60 ) # store in cache for 1 min. From 0f6a9242ec2f96a4c74ddf91b8a8696a1ae6f038 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 12:44:30 -0800 Subject: [PATCH 26/56] (fix) test_normal_router_tpm_limit --- litellm/tests/test_parallel_request_limiter.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/litellm/tests/test_parallel_request_limiter.py b/litellm/tests/test_parallel_request_limiter.py index 528bb19d2..bfac8ddea 100644 --- a/litellm/tests/test_parallel_request_limiter.py +++ b/litellm/tests/test_parallel_request_limiter.py @@ -306,6 +306,10 @@ async def test_normal_router_call(): @pytest.mark.asyncio async def test_normal_router_tpm_limit(): + from litellm._logging import verbose_proxy_logger + import logging + + verbose_proxy_logger.setLevel(level=logging.DEBUG) model_list = [ { "model_name": "azure-model", @@ -353,6 +357,7 @@ async def test_normal_router_tpm_limit(): current_minute = datetime.now().strftime("%M") precise_minute = f"{current_date}-{current_hour}-{current_minute}" request_count_api_key = f"{_api_key}::{precise_minute}::request_count" + print("Test: Checking current_requests for precise_minute=", precise_minute) assert ( parallel_request_handler.user_api_key_cache.get_cache( @@ -366,6 +371,7 @@ async def test_normal_router_tpm_limit(): model="azure-model", messages=[{"role": "user", "content": "Write me a paragraph on the moon"}], metadata={"user_api_key": _api_key}, + mock_response="hello", ) await asyncio.sleep(1) # success is done in a separate thread print(f"response: {response}") From 86c84d72e5cd750397582947efc2410fff42b17d Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 12:47:19 -0800 Subject: [PATCH 27/56] (ci/cd) fix test_config_no_auth --- .../test_configs/test_config_no_auth.yaml | 95 +++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/litellm/tests/test_configs/test_config_no_auth.yaml b/litellm/tests/test_configs/test_config_no_auth.yaml index ccebe016d..9d7aff570 100644 --- a/litellm/tests/test_configs/test_config_no_auth.yaml +++ b/litellm/tests/test_configs/test_config_no_auth.yaml @@ -9,11 +9,21 @@ model_list: api_key: os.environ/AZURE_CANADA_API_KEY model: azure/gpt-35-turbo model_name: azure-model +- litellm_params: + api_base: https://gateway.ai.cloudflare.com/v1/0399b10e77ac6668c80404a5ff49eb37/litellm-test/azure-openai/openai-gpt-4-test-v-1 + api_key: os.environ/AZURE_API_KEY + model: azure/chatgpt-v-2 + model_name: azure-cloudflare-model - litellm_params: api_base: https://openai-france-1234.openai.azure.com api_key: os.environ/AZURE_FRANCE_API_KEY model: azure/gpt-turbo model_name: azure-model +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + model_name: test_openai_models - litellm_params: model: gpt-3.5-turbo model_info: @@ -26,8 +36,93 @@ model_list: description: this is a test openai model id: 4d1ee26c-abca-450c-8744-8e87fd6755e9 model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 00e19c0f-b63d-42bb-88e9-016fb0c60764 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 79fc75bf-8e1b-47d5-8d24-9365a854af03 + model_name: test_openai_models +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + model: azure/azure-embedding-model + model_info: + mode: embedding + model_name: azure-embedding-model +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 55848c55-4162-40f9-a6e2-9a722b9ef404 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 34339b1e-e030-4bcc-a531-c48559f10ce4 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: f6f74e14-ac64-4403-9365-319e584dcdc5 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 9b1ef341-322c-410a-8992-903987fef439 + model_name: test_openai_models - litellm_params: model: bedrock/amazon.titan-embed-text-v1 model_info: mode: embedding model_name: amazon-embeddings +- litellm_params: + model: sagemaker/berri-benchmarking-gpt-j-6b-fp16 + model_info: + mode: embedding + model_name: GPT-J 6B - Sagemaker Text Embedding (Internal) +- litellm_params: + model: dall-e-3 + model_info: + mode: image_generation + model_name: dall-e-3 +- litellm_params: + api_base: os.environ/AZURE_SWEDEN_API_BASE + api_key: os.environ/AZURE_SWEDEN_API_KEY + api_version: 2023-12-01-preview + model: azure/dall-e-3-test + model_info: + mode: image_generation + model_name: dall-e-3 +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-06-01-preview + model: azure/ + model_info: + mode: image_generation + model_name: dall-e-2 +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + model: azure/azure-embedding-model + model_info: + base_model: text-embedding-ada-002 + mode: embedding + model_name: text-embedding-ada-002 +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 34cb2419-7c63-44ae-a189-53f1d1ce5953 + model_name: test_openai_models \ No newline at end of file From 1bcd2eafd2be33074fbcd0c95c78b40c62f9c3db Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 12:53:47 -0800 Subject: [PATCH 28/56] (ci/cd) run again --- litellm/tests/test_completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index bd0301f20..e0ee05d4f 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -41,7 +41,7 @@ def test_completion_custom_provider_model_name(): messages=messages, logger_fn=logger_fn, ) - # Add any assertions here to check the, response + # Add any assertions here to check the,response print(response) print(response["choices"][0]["finish_reason"]) except litellm.Timeout as e: From bc6d29f879d484e762d872e0fb9a48e8ca2bc174 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 13:02:36 -0800 Subject: [PATCH 29/56] (ci/cd) run again --- litellm/tests/test_completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index e0ee05d4f..bd0301f20 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -41,7 +41,7 @@ def test_completion_custom_provider_model_name(): messages=messages, logger_fn=logger_fn, ) - # Add any assertions here to check the,response + # Add any assertions here to check the, response print(response) print(response["choices"][0]["finish_reason"]) except litellm.Timeout as e: From 420d2754d706a8379dfe02006fa53eef4c6e2028 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 13:10:29 -0800 Subject: [PATCH 30/56] fix(utils.py): round max tokens to be int always --- litellm/tests/test_completion.py | 5 +++-- litellm/utils.py | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index bd0301f20..de79c97af 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -544,13 +544,13 @@ def hf_test_completion_tgi(): def test_completion_openai(): try: litellm.set_verbose = True + litellm.drop_params = True print(f"api key: {os.environ['OPENAI_API_KEY']}") litellm.api_key = os.environ["OPENAI_API_KEY"] response = completion( model="gpt-3.5-turbo", - messages=messages, + messages=[{"role": "user", "content": "Hey"}], max_tokens=10, - request_timeout=1, metadata={"hi": "bye"}, ) print("This is the response object\n", response) @@ -565,6 +565,7 @@ def test_completion_openai(): assert len(response_str) > 1 litellm.api_key = None + raise Exception("it works!") except Timeout as e: pass except Exception as e: diff --git a/litellm/utils.py b/litellm/utils.py index 5ccb85ef0..fdca57e51 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2348,7 +2348,9 @@ def client(original_function): elif user_max_tokens + input_tokens > max_output_tokens: user_max_tokens = max_output_tokens - input_tokens print_verbose(f"user_max_tokens: {user_max_tokens}") - kwargs["max_tokens"] = user_max_tokens + kwargs["max_tokens"] = int( + round(user_max_tokens) + ) # make sure max tokens is always an int except Exception as e: print_verbose(f"Error while checking max token limit: {str(e)}") # MODEL CALL From 0bd2734520a2771845504087a9a4ba1df5419dca Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 13:10:49 -0800 Subject: [PATCH 31/56] =?UTF-8?q?bump:=20version=201.22.8=20=E2=86=92=201.?= =?UTF-8?q?22.9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 17d80ae8e..944aad7f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.22.8" +version = "1.22.9" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -69,7 +69,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.22.8" +version = "1.22.9" version_files = [ "pyproject.toml:^version" ] From dc9cb13e10da63e94602ad11f9afc85c50d05c36 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 13:09:48 -0800 Subject: [PATCH 32/56] (feat) show langfuse logging tags better through proxy --- litellm/integrations/langfuse.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 82de33366..3c3e793df 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -252,8 +252,14 @@ class LangFuseLogger: print_verbose(f"trace: {cost}") if supports_tags: for key, value in metadata.items(): - tags.append(f"{key}:{value}") + if key in [ + "user_api_key", + "user_api_key_user_id", + ]: + tags.append(f"{key}:{value}") if "cache_hit" in kwargs: + if kwargs["cache_hit"] is None: + kwargs["cache_hit"] = False tags.append(f"cache_hit:{kwargs['cache_hit']}") trace_params.update({"tags": tags}) From 6a5e96ac1b54a65c7a8be8be65a3b8a757055eeb Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 12:28:21 -0800 Subject: [PATCH 33/56] (feat )add semantic cache --- litellm/caching.py | 102 +++++++++++++++++++++++++++++++++- litellm/tests/test_caching.py | 25 +++++++++ 2 files changed, 124 insertions(+), 3 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index d0721fe9a..e1ef95dc3 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -83,7 +83,6 @@ class InMemoryCache(BaseCache): self.cache_dict.clear() self.ttl_dict.clear() - async def disconnect(self): pass @@ -217,7 +216,6 @@ class RedisCache(BaseCache): def flush_cache(self): self.redis_client.flushall() - async def disconnect(self): pass @@ -225,6 +223,102 @@ class RedisCache(BaseCache): self.redis_client.delete(key) +class RedisSemanticCache(RedisCache): + def __init__(self, host, port, password, **kwargs): + super().__init__() + + # from redis.commands.search.field import TagField, TextField, NumericField, VectorField + # from redis.commands.search.indexDefinition import IndexDefinition, IndexType + # from redis.commands.search.query import Query + + # INDEX_NAME = 'idx:litellm_completion_response_vss' + # DOC_PREFIX = 'bikes:' + + # try: + # # check to see if index exists + # client.ft(INDEX_NAME).info() + # print('Index already exists!') + # except: + # # schema + # schema = ( + # TextField('$.model', no_stem=True, as_name='model'), + # TextField('$.brand', no_stem=True, as_name='brand'), + # NumericField('$.price', as_name='price'), + # TagField('$.type', as_name='type'), + # TextField('$.description', as_name='description'), + # VectorField('$.description_embeddings', + # 'FLAT', { + # 'TYPE': 'FLOAT32', + # 'DIM': VECTOR_DIMENSION, + # 'DISTANCE_METRIC': 'COSINE', + # }, as_name='vector' + # ), + # ) + + # # index Definition + # definition = IndexDefinition(prefix=[DOC_PREFIX], index_type=IndexType.JSON) + + # # create Index + # client.ft(INDEX_NAME).create_index(fields=schema, definition=definition) + + def set_cache(self, key, value, **kwargs): + ttl = kwargs.get("ttl", None) + print_verbose(f"Set Redis Cache: key: {key}\nValue {value}\nttl={ttl}") + try: + # get text response + # print("in redis semantic cache: value: ", value) + llm_response = value["response"] + + # if llm_response is a string, convert it to a dictionary + if isinstance(llm_response, str): + llm_response = json.loads(llm_response) + + # print("converted llm_response: ", llm_response) + response = llm_response["choices"][0]["message"]["content"] + + # create embedding response + + embedding_response = litellm.embedding( + model="text-embedding-ada-002", + input=response, + cache={"no-store": True}, + ) + + raw_embedding = embedding_response["data"][0]["embedding"] + raw_embedding_dimension = len(raw_embedding) + + # print("embedding: ", raw_embedding) + key = "litellm-semantic:" + key + self.redis_client.json().set( + name=key, + path="$", + obj=json.dumps( + { + "response": response, + "embedding": raw_embedding, + "dimension": raw_embedding_dimension, + } + ), + ) + + stored_redis_value = self.redis_client.json().get(name=key) + + # print("Stored Redis Value: ", stored_redis_value) + + except Exception as e: + # print("Error occurred: ", e) + # NON blocking - notify users Redis is throwing an exception + logging.debug("LiteLLM Caching: set() - Got exception from REDIS : ", e) + + def get_cache(self, key, **kwargs): + pass + + async def async_set_cache(self, key, value, **kwargs): + pass + + async def async_get_cache(self, key, **kwargs): + pass + class S3Cache(BaseCache): def __init__( @@ -429,7 +523,7 @@ class DualCache(BaseCache): class Cache: def __init__( self, - type: Optional[Literal["local", "redis", "s3"]] = "local", + type: Optional[Literal["local", "redis", "redis-semantic", "s3"]] = "local", host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, @@ -468,6 +562,8 @@ class Cache: """ if type == "redis": self.cache: BaseCache = RedisCache(host, port, password, **kwargs) + elif type == "redis-semantic": + self.cache = RedisSemanticCache(host, port, password, **kwargs) elif type == "local": self.cache = InMemoryCache() elif type == "s3": diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 468ab6f80..32904ab78 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -987,3 +987,28 @@ def test_cache_context_managers(): # test_cache_context_managers() + + +def test_redis_semantic_cache_completion(): + litellm.set_verbose = False + + random_number = random.randint( + 1, 100000 + ) # add a random number to ensure it's always adding / reading from cache + messages = [ + {"role": "user", "content": f"write a one sentence poem about: {random_number}"} + ] + litellm.cache = Cache( + type="redis-semantic", + host=os.environ["REDIS_HOST"], + port=os.environ["REDIS_PORT"], + password=os.environ["REDIS_PASSWORD"], + ) + print("test2 for Redis Caching - non streaming") + response1 = completion(model="gpt-3.5-turbo", messages=messages, max_tokens=20) + # response2 = completion( + # model="gpt-3.5-turbo", messages=messages,max_tokens=20 + # ) + + +# test_redis_cache_completion() From 8436c0489cdb59c338c10d2a4e80eec230d7fa0f Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 17:58:12 -0800 Subject: [PATCH 34/56] (feat) working - sync semantic caching --- litellm/caching.py | 227 ++++++++++++++++++++++++++++++--------------- 1 file changed, 152 insertions(+), 75 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index e1ef95dc3..0a1046f0d 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -223,94 +223,161 @@ class RedisCache(BaseCache): self.redis_client.delete(key) -class RedisSemanticCache(RedisCache): - def __init__(self, host, port, password, **kwargs): - super().__init__() +class RedisSemanticCache(BaseCache): + def __init__( + self, + host=None, + port=None, + password=None, + redis_url=None, + similarity_threshold=None, + **kwargs, + ): + from redisvl.index import SearchIndex + from redisvl.query import VectorQuery - # from redis.commands.search.field import TagField, TextField, NumericField, VectorField - # from redis.commands.search.indexDefinition import IndexDefinition, IndexType - # from redis.commands.search.query import Query + print_verbose( + "redis semantic-cache initializing INDEX - litellm_semantic_cache_index" + ) + if similarity_threshold is None: + raise Exception("similarity_threshold must be provided, passed None") + self.similarity_threshold = similarity_threshold + schema = { + "index": { + "name": "litellm_semantic_cache_index", + "prefix": "litellm", + "storage_type": "hash", + }, + "fields": { + "text": [{"name": "response"}], + "text": [{"name": "prompt"}], + "vector": [ + { + "name": "litellm_embedding", + "dims": 1536, + "distance_metric": "cosine", + "algorithm": "flat", + "datatype": "float32", + } + ], + }, + } + self.index = SearchIndex.from_dict(schema) + if redis_url is None: + # if no url passed, check if host, port and password are passed, if not raise an Exception + if host is None or port is None or password is None: + raise Exception(f"Redis host, port, and password must be provided") + redis_url = "redis://:" + password + "@" + host + ":" + port + print_verbose(f"redis semantic-cache redis_url: {redis_url}") + self.index.connect(redis_url=redis_url) + self.index.create(overwrite=False) # don't overwrite existing index - # INDEX_NAME = 'idx:litellm_completion_response_vss' - # DOC_PREFIX = 'bikes:' + def _get_cache_logic(self, cached_response: Any): + """ + Common 'get_cache_logic' across sync + async redis client implementations + """ + if cached_response is None: + return cached_response - # try: - # # check to see if index exists - # client.ft(INDEX_NAME).info() - # print('Index already exists!') - # except: - # # schema - # schema = ( - # TextField('$.model', no_stem=True, as_name='model'), - # TextField('$.brand', no_stem=True, as_name='brand'), - # NumericField('$.price', as_name='price'), - # TagField('$.type', as_name='type'), - # TextField('$.description', as_name='description'), - # VectorField('$.description_embeddings', - # 'FLAT', { - # 'TYPE': 'FLOAT32', - # 'DIM': VECTOR_DIMENSION, - # 'DISTANCE_METRIC': 'COSINE', - # }, as_name='vector' - # ), - # ) + # check if cached_response is bytes + if isinstance(cached_response, bytes): + cached_response = cached_response.decode("utf-8") - # # index Definition - # definition = IndexDefinition(prefix=[DOC_PREFIX], index_type=IndexType.JSON) - - # # create Index - # client.ft(INDEX_NAME).create_index(fields=schema, definition=definition) + try: + cached_response = json.loads( + cached_response + ) # Convert string to dictionary + except: + cached_response = ast.literal_eval(cached_response) + return cached_response def set_cache(self, key, value, **kwargs): - ttl = kwargs.get("ttl", None) - print_verbose(f"Set Redis Cache: key: {key}\nValue {value}\nttl={ttl}") - try: - # get text response - # print("in redis semantic cache: value: ", value) - llm_response = value["response"] + import numpy as np - # if llm_response is a string, convert it to a dictionary - if isinstance(llm_response, str): - llm_response = json.loads(llm_response) + print_verbose(f"redis semantic-cache set_cache, kwargs: {kwargs}") - # print("converted llm_response: ", llm_response) - response = llm_response["choices"][0]["message"]["content"] + # get the prompt + messages = kwargs["messages"] + prompt = "" + for message in messages: + prompt += message["content"] - # create embedding response + # create an embedding for prompt + embedding_response = litellm.embedding( + model="text-embedding-ada-002", + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) - embedding_response = litellm.embedding( - model="text-embedding-ada-002", - input=response, - cache={"no-store": True}, - ) + # get the embedding + embedding = embedding_response["data"][0]["embedding"] - raw_embedding = embedding_response["data"][0]["embedding"] - raw_embedding_dimension = len(raw_embedding) + # make the embedding a numpy array, convert to bytes + embedding_bytes = np.array(embedding, dtype=np.float32).tobytes() + value = str(value) + assert isinstance(value, str) - # print("embedding: ", raw_embedding) - key = "litellm-semantic:" + key - self.redis_client.json().set( - name=key, - path="$", - obj=json.dumps( - { - "response": response, - "embedding": raw_embedding, - "dimension": raw_embedding_dimension, - } - ), - ) + new_data = [ + {"response": value, "prompt": prompt, "litellm_embedding": embedding_bytes} + ] - stored_redis_value = self.redis_client.json().get(name=key) + # Add more data + keys = self.index.load(new_data) - # print("Stored Redis Value: ", stored_redis_value) - - except Exception as e: - # print("Error occurred: ", e) - # NON blocking - notify users Redis is throwing an exception - logging.debug("LiteLLM Caching: set() - Got exception from REDIS : ", e) + pass def get_cache(self, key, **kwargs): + print_verbose(f"redis semantic-cache get_cache, kwargs: {kwargs}") + from redisvl.query import VectorQuery + import numpy as np + + # query + + # get the messages + messages = kwargs["messages"] + prompt = "" + for message in messages: + prompt += message["content"] + + # convert to embedding + embedding_response = litellm.embedding( + model="text-embedding-ada-002", + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) + + # get the embedding + embedding = embedding_response["data"][0]["embedding"] + + query = VectorQuery( + vector=embedding, + vector_field_name="litellm_embedding", + return_fields=["response", "prompt", "vector_distance"], + num_results=1, + ) + + results = self.index.query(query) + + vector_distance = results[0]["vector_distance"] + vector_distance = float(vector_distance) + similarity = 1 - vector_distance + cached_prompt = results[0]["prompt"] + + # check similarity, if more than self.similarity_threshold, return results + print_verbose( + f"semantic cache: similarity threshold: {self.similarity_threshold}, similarity: {similarity}, prompt: {prompt}, closest_cached_prompt: {cached_prompt}" + ) + if similarity > self.similarity_threshold: + # cache hit ! + cached_value = results[0]["response"] + print_verbose( + f"got a cache hit, similarity: {similarity}, Current prompt: {prompt}, cached_prompt: {cached_prompt}" + ) + return self._get_cache_logic(cached_response=cached_value) + else: + # cache miss ! + return None + pass async def async_set_cache(self, key, value, **kwargs): @@ -527,6 +594,7 @@ class Cache: host: Optional[str] = None, port: Optional[str] = None, password: Optional[str] = None, + similarity_threshold: Optional[float] = None, supported_call_types: Optional[ List[Literal["completion", "acompletion", "embedding", "aembedding"]] ] = ["completion", "acompletion", "embedding", "aembedding"], @@ -547,10 +615,12 @@ class Cache: Initializes the cache based on the given type. Args: - type (str, optional): The type of cache to initialize. Can be "local" or "redis". Defaults to "local". + type (str, optional): The type of cache to initialize. Can be "local", "redis", "redis-semantic", or "s3". Defaults to "local". host (str, optional): The host address for the Redis cache. Required if type is "redis". port (int, optional): The port number for the Redis cache. Required if type is "redis". password (str, optional): The password for the Redis cache. Required if type is "redis". + similarity_threshold (float, optional): The similarity threshold for semantic-caching, Required if type is "redis-semantic" + supported_call_types (list, optional): List of call types to cache for. Defaults to cache == on for all call types. **kwargs: Additional keyword arguments for redis.Redis() cache @@ -563,7 +633,13 @@ class Cache: if type == "redis": self.cache: BaseCache = RedisCache(host, port, password, **kwargs) elif type == "redis-semantic": - self.cache = RedisSemanticCache(host, port, password, **kwargs) + self.cache = RedisSemanticCache( + host, + port, + password, + similarity_threshold=similarity_threshold, + **kwargs, + ) elif type == "local": self.cache = InMemoryCache() elif type == "s3": @@ -743,6 +819,7 @@ class Cache: The cached result if it exists, otherwise None. """ try: # never block execution + messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: @@ -752,7 +829,7 @@ class Cache: max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) - cached_result = self.cache.get_cache(cache_key) + cached_result = self.cache.get_cache(cache_key, messages=messages) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) From 5d345b5b5760c8d2af238c19d2fb646d178ba5c2 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 17:58:32 -0800 Subject: [PATCH 35/56] (test) semantic cache --- litellm/tests/test_caching.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 32904ab78..3ac812cf3 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -990,7 +990,7 @@ def test_cache_context_managers(): def test_redis_semantic_cache_completion(): - litellm.set_verbose = False + litellm.set_verbose = True random_number = random.randint( 1, 100000 @@ -1003,6 +1003,7 @@ def test_redis_semantic_cache_completion(): host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], password=os.environ["REDIS_PASSWORD"], + similarity_threshold=0.5, ) print("test2 for Redis Caching - non streaming") response1 = completion(model="gpt-3.5-turbo", messages=messages, max_tokens=20) From 28676b2e0b72f9fec5eb9ac070f26ce31cb56f34 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 18:22:50 -0800 Subject: [PATCH 36/56] (test) semantic caching --- litellm/tests/test_caching.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 3ac812cf3..4b47614cc 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -995,21 +995,29 @@ def test_redis_semantic_cache_completion(): random_number = random.randint( 1, 100000 ) # add a random number to ensure it's always adding / reading from cache - messages = [ - {"role": "user", "content": f"write a one sentence poem about: {random_number}"} - ] + + print("testing semantic caching") litellm.cache = Cache( type="redis-semantic", host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], password=os.environ["REDIS_PASSWORD"], - similarity_threshold=0.5, + similarity_threshold=0.8, ) - print("test2 for Redis Caching - non streaming") - response1 = completion(model="gpt-3.5-turbo", messages=messages, max_tokens=20) - # response2 = completion( - # model="gpt-3.5-turbo", messages=messages,max_tokens=20 - # ) + response1 = completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ], + max_tokens=20, + ) + print(f"response1: {response1}") + + assert response1.id == "chatcmpl-8p5GejSWLJ1pDI1lfhc6Idhwd2bDJ" + # assert response1.choices[0].message == 1 # test_redis_cache_completion() From 58c4a29fbcf14467cab95e003dce94ca2a11f271 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 5 Feb 2024 18:25:22 -0800 Subject: [PATCH 37/56] (fix) semantic cache --- litellm/caching.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/litellm/caching.py b/litellm/caching.py index 0a1046f0d..877f935fa 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -270,7 +270,10 @@ class RedisSemanticCache(BaseCache): redis_url = "redis://:" + password + "@" + host + ":" + port print_verbose(f"redis semantic-cache redis_url: {redis_url}") self.index.connect(redis_url=redis_url) - self.index.create(overwrite=False) # don't overwrite existing index + try: + self.index.create(overwrite=False) # don't overwrite existing index + except Exception as e: + print_verbose(f"Got exception creating semantic cache index: {str(e)}") def _get_cache_logic(self, cached_response: Any): """ From f66b6f5cd7899d7111272e9444b39bced122c6eb Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 08:13:12 -0800 Subject: [PATCH 38/56] (feat) RedisSemanticCache - async --- litellm/caching.py | 112 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 106 insertions(+), 6 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index 877f935fa..ad37f2077 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -231,6 +231,7 @@ class RedisSemanticCache(BaseCache): password=None, redis_url=None, similarity_threshold=None, + use_async=False, **kwargs, ): from redisvl.index import SearchIndex @@ -262,14 +263,19 @@ class RedisSemanticCache(BaseCache): ], }, } - self.index = SearchIndex.from_dict(schema) if redis_url is None: # if no url passed, check if host, port and password are passed, if not raise an Exception if host is None or port is None or password is None: raise Exception(f"Redis host, port, and password must be provided") redis_url = "redis://:" + password + "@" + host + ":" + port print_verbose(f"redis semantic-cache redis_url: {redis_url}") - self.index.connect(redis_url=redis_url) + if use_async == False: + self.index = SearchIndex.from_dict(schema) + self.index.connect(redis_url=redis_url) + elif use_async == True: + schema["index"]["name"] = "litellm_semantic_cache_index_async" + self.index = SearchIndex.from_dict(schema) + self.index.connect(redis_url=redis_url, use_async=True) try: self.index.create(overwrite=False) # don't overwrite existing index except Exception as e: @@ -327,10 +333,10 @@ class RedisSemanticCache(BaseCache): # Add more data keys = self.index.load(new_data) - pass + return def get_cache(self, key, **kwargs): - print_verbose(f"redis semantic-cache get_cache, kwargs: {kwargs}") + print_verbose(f"sync redis semantic-cache get_cache, kwargs: {kwargs}") from redisvl.query import VectorQuery import numpy as np @@ -360,6 +366,11 @@ class RedisSemanticCache(BaseCache): ) results = self.index.query(query) + if results == None: + return None + if isinstance(results, list): + if len(results) == 0: + return None vector_distance = results[0]["vector_distance"] vector_distance = float(vector_distance) @@ -384,9 +395,93 @@ class RedisSemanticCache(BaseCache): pass async def async_set_cache(self, key, value, **kwargs): - pass + import numpy as np + + print_verbose(f"async redis semantic-cache set_cache, kwargs: {kwargs}") + + # get the prompt + messages = kwargs["messages"] + prompt = "" + for message in messages: + prompt += message["content"] + # create an embedding for prompt + + embedding_response = await litellm.aembedding( + model="text-embedding-ada-002", + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) + + # get the embedding + embedding = embedding_response["data"][0]["embedding"] + + # make the embedding a numpy array, convert to bytes + embedding_bytes = np.array(embedding, dtype=np.float32).tobytes() + value = str(value) + assert isinstance(value, str) + + new_data = [ + {"response": value, "prompt": prompt, "litellm_embedding": embedding_bytes} + ] + + # Add more data + keys = await self.index.aload(new_data) + return async def async_get_cache(self, key, **kwargs): + print_verbose(f"async redis semantic-cache get_cache, kwargs: {kwargs}") + from redisvl.query import VectorQuery + import numpy as np + + # query + + # get the messages + messages = kwargs["messages"] + prompt = "" + for message in messages: + prompt += message["content"] + + # convert to embedding + embedding_response = await litellm.aembedding( + model="text-embedding-ada-002", + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) + + # get the embedding + embedding = embedding_response["data"][0]["embedding"] + + query = VectorQuery( + vector=embedding, + vector_field_name="litellm_embedding", + return_fields=["response", "prompt", "vector_distance"], + ) + results = await self.index.aquery(query) + if results == None: + return None + if isinstance(results, list): + if len(results) == 0: + return None + + vector_distance = results[0]["vector_distance"] + vector_distance = float(vector_distance) + similarity = 1 - vector_distance + cached_prompt = results[0]["prompt"] + + # check similarity, if more than self.similarity_threshold, return results + print_verbose( + f"semantic cache: similarity threshold: {self.similarity_threshold}, similarity: {similarity}, prompt: {prompt}, closest_cached_prompt: {cached_prompt}" + ) + if similarity > self.similarity_threshold: + # cache hit ! + cached_value = results[0]["response"] + print_verbose( + f"got a cache hit, similarity: {similarity}, Current prompt: {prompt}, cached_prompt: {cached_prompt}" + ) + return self._get_cache_logic(cached_response=cached_value) + else: + # cache miss ! + return None pass @@ -612,6 +707,7 @@ class Cache: s3_aws_secret_access_key: Optional[str] = None, s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, + redis_semantic_cache_use_async=False, **kwargs, ): """ @@ -641,6 +737,7 @@ class Cache: port, password, similarity_threshold=similarity_threshold, + use_async=redis_semantic_cache_use_async, **kwargs, ) elif type == "local": @@ -847,6 +944,7 @@ class Cache: Used for embedding calls in async wrapper """ try: # never block execution + messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: @@ -856,7 +954,9 @@ class Cache: max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) - cached_result = await self.cache.async_get_cache(cache_key) + cached_result = await self.cache.async_get_cache( + cache_key, messages=messages + ) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) From 8a75cbd3ad7ae42e650d5b3e3173b109aca7e2f5 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 08:14:54 -0800 Subject: [PATCH 39/56] (test) async semantic cache --- litellm/tests/test_caching.py | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 4b47614cc..a1a42ff65 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -991,6 +991,9 @@ def test_cache_context_managers(): def test_redis_semantic_cache_completion(): litellm.set_verbose = True + import logging + + logging.basicConfig(level=logging.DEBUG) random_number = random.randint( 1, 100000 @@ -1021,3 +1024,38 @@ def test_redis_semantic_cache_completion(): # test_redis_cache_completion() + + +@pytest.mark.asyncio +async def test_redis_semantic_cache_acompletion(): + litellm.set_verbose = True + import logging + + logging.basicConfig(level=logging.DEBUG) + + random_number = random.randint( + 1, 100000 + ) # add a random number to ensure it's always adding / reading from cache + + print("testing semantic caching") + litellm.cache = Cache( + type="redis-semantic", + host=os.environ["REDIS_HOST"], + port=os.environ["REDIS_PORT"], + password=os.environ["REDIS_PASSWORD"], + similarity_threshold=0.8, + redis_semantic_cache_use_async=True, + ) + response1 = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ], + max_tokens=20, + ) + print(f"response1: {response1}") + + assert response1.id == "chatcmpl-8pI86yvT7fvgLDjngZSKULy1iP1o5" From f1dea5571abd754eb7fef45b414229f4da64f6e3 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 08:52:57 -0800 Subject: [PATCH 40/56] (feat) working semantic-cache on litellm proxy --- litellm/caching.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index ad37f2077..a7958d074 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -266,21 +266,30 @@ class RedisSemanticCache(BaseCache): if redis_url is None: # if no url passed, check if host, port and password are passed, if not raise an Exception if host is None or port is None or password is None: - raise Exception(f"Redis host, port, and password must be provided") + # try checking env for host, port and password + import os + + host = os.getenv("REDIS_HOST") + port = os.getenv("REDIS_PORT") + password = os.getenv("REDIS_PASSWORD") + if host is None or port is None or password is None: + raise Exception("Redis host, port, and password must be provided") + redis_url = "redis://:" + password + "@" + host + ":" + port print_verbose(f"redis semantic-cache redis_url: {redis_url}") if use_async == False: self.index = SearchIndex.from_dict(schema) self.index.connect(redis_url=redis_url) + try: + self.index.create(overwrite=False) # don't overwrite existing index + except Exception as e: + print_verbose(f"Got exception creating semantic cache index: {str(e)}") elif use_async == True: schema["index"]["name"] = "litellm_semantic_cache_index_async" self.index = SearchIndex.from_dict(schema) self.index.connect(redis_url=redis_url, use_async=True) - try: - self.index.create(overwrite=False) # don't overwrite existing index - except Exception as e: - print_verbose(f"Got exception creating semantic cache index: {str(e)}") + # def _get_cache_logic(self, cached_response: Any): """ Common 'get_cache_logic' across sync + async redis client implementations @@ -397,6 +406,10 @@ class RedisSemanticCache(BaseCache): async def async_set_cache(self, key, value, **kwargs): import numpy as np + try: + await self.index.acreate(overwrite=False) # don't overwrite existing index + except Exception as e: + print_verbose(f"Got exception creating semantic cache index: {str(e)}") print_verbose(f"async redis semantic-cache set_cache, kwargs: {kwargs}") # get the prompt From 5f2877e699d1c7d92170a18b86ee33129d678f45 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 08:54:36 -0800 Subject: [PATCH 41/56] (feat) redis-semantic cache --- litellm/utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index fdca57e51..c25572c03 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -55,7 +55,7 @@ from .integrations.litedebugger import LiteDebugger from .proxy._types import KeyManagementSystem from openai import OpenAIError as OriginalError from openai._models import BaseModel as OpenAIObject -from .caching import S3Cache +from .caching import S3Cache, RedisSemanticCache from .exceptions import ( AuthenticationError, BadRequestError, @@ -2533,6 +2533,14 @@ def client(original_function): ): if len(cached_result) == 1 and cached_result[0] is None: cached_result = None + elif isinstance(litellm.cache.cache, RedisSemanticCache): + preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs) + kwargs[ + "preset_cache_key" + ] = preset_cache_key # for streaming calls, we need to pass the preset_cache_key + cached_result = await litellm.cache.async_get_cache( + *args, **kwargs + ) else: preset_cache_key = litellm.cache.get_cache_key(*args, **kwargs) kwargs[ From 2881e7b111a74d70768138a463f77bbffe39bf14 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 08:55:25 -0800 Subject: [PATCH 42/56] (feat) working semantic cache on proxy --- litellm/proxy/proxy_config.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index bd844bd7b..41c3b4182 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -73,10 +73,12 @@ litellm_settings: max_budget: 1.5000 models: ["azure-gpt-3.5"] duration: None - upperbound_key_generate_params: - max_budget: 100 - duration: "30d" - # cache: True + cache: True # set cache responses to True + cache_params: + type: "redis-semantic" + similarity_threshold: 0.8 + redis_semantic_cache_use_async: True + # cache: True # setting callback class # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] From 6edf675e411c497f8ae59fe0914e5f742700d8c0 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 09:30:45 -0800 Subject: [PATCH 43/56] (fix) add redisvl==0.0.7 --- .circleci/requirements.txt | 3 ++- requirements.txt | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index 85b576bff..4730fc28b 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -10,4 +10,5 @@ anthropic boto3 orjson pydantic -google-cloud-aiplatform \ No newline at end of file +google-cloud-aiplatform +redisvl==0.0.7 # semantic caching \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 768e8dff3..b0a49553d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,6 +9,7 @@ uvicorn==0.22.0 # server dep gunicorn==21.2.0 # server dep boto3==1.28.58 # aws bedrock/sagemaker calls redis==4.6.0 # caching +redisvl==0.0.7 # semantic caching prisma==0.11.0 # for db mangum==0.17.0 # for aws lambda functions google-generativeai==0.3.2 # for vertex ai calls From 80af1c9a5832a2d1a717faafe79f365ac5dc77a0 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 09:31:57 -0800 Subject: [PATCH 44/56] (feat) log semantic_sim to langfuse --- litellm/caching.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/litellm/caching.py b/litellm/caching.py index a7958d074..133d1db6d 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -471,9 +471,11 @@ class RedisSemanticCache(BaseCache): ) results = await self.index.aquery(query) if results == None: + kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0 return None if isinstance(results, list): if len(results) == 0: + kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0 return None vector_distance = results[0]["vector_distance"] @@ -485,6 +487,10 @@ class RedisSemanticCache(BaseCache): print_verbose( f"semantic cache: similarity threshold: {self.similarity_threshold}, similarity: {similarity}, prompt: {prompt}, closest_cached_prompt: {cached_prompt}" ) + + # update kwargs["metadata"] with similarity, don't rewrite the original metadata + kwargs.setdefault("metadata", {})["semantic-similarity"] = similarity + if similarity > self.similarity_threshold: # cache hit ! cached_value = results[0]["response"] @@ -968,7 +974,7 @@ class Cache: "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) cached_result = await self.cache.async_get_cache( - cache_key, messages=messages + cache_key, *args, **kwargs ) return self._get_cache_logic( cached_result=cached_result, max_age=max_age From 48fa97125d071d0909341d72fb271c6827f89deb Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:22:02 -0800 Subject: [PATCH 45/56] allow setting redis_semantic cache_embedding model --- litellm/caching.py | 54 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index 133d1db6d..6bf53ea45 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -232,6 +232,7 @@ class RedisSemanticCache(BaseCache): redis_url=None, similarity_threshold=None, use_async=False, + embedding_model="text-embedding-ada-002", **kwargs, ): from redisvl.index import SearchIndex @@ -243,6 +244,7 @@ class RedisSemanticCache(BaseCache): if similarity_threshold is None: raise Exception("similarity_threshold must be provided, passed None") self.similarity_threshold = similarity_threshold + self.embedding_model = embedding_model schema = { "index": { "name": "litellm_semantic_cache_index", @@ -322,7 +324,7 @@ class RedisSemanticCache(BaseCache): # create an embedding for prompt embedding_response = litellm.embedding( - model="text-embedding-ada-002", + model=self.embedding_model, input=prompt, cache={"no-store": True, "no-cache": True}, ) @@ -359,7 +361,7 @@ class RedisSemanticCache(BaseCache): # convert to embedding embedding_response = litellm.embedding( - model="text-embedding-ada-002", + model=self.embedding_model, input=prompt, cache={"no-store": True, "no-cache": True}, ) @@ -405,6 +407,7 @@ class RedisSemanticCache(BaseCache): async def async_set_cache(self, key, value, **kwargs): import numpy as np + from litellm.proxy.proxy_server import llm_router, llm_model_list try: await self.index.acreate(overwrite=False) # don't overwrite existing index @@ -418,12 +421,24 @@ class RedisSemanticCache(BaseCache): for message in messages: prompt += message["content"] # create an embedding for prompt - - embedding_response = await litellm.aembedding( - model="text-embedding-ada-002", - input=prompt, - cache={"no-store": True, "no-cache": True}, + router_model_names = ( + [m["model_name"] for m in llm_model_list] + if llm_model_list is not None + else [] ) + if llm_router is not None and self.embedding_model in router_model_names: + embedding_response = await llm_router.aembedding( + model=self.embedding_model, + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) + else: + # convert to embedding + embedding_response = await litellm.aembedding( + model=self.embedding_model, + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) # get the embedding embedding = embedding_response["data"][0]["embedding"] @@ -445,6 +460,7 @@ class RedisSemanticCache(BaseCache): print_verbose(f"async redis semantic-cache get_cache, kwargs: {kwargs}") from redisvl.query import VectorQuery import numpy as np + from litellm.proxy.proxy_server import llm_router, llm_model_list # query @@ -454,12 +470,24 @@ class RedisSemanticCache(BaseCache): for message in messages: prompt += message["content"] - # convert to embedding - embedding_response = await litellm.aembedding( - model="text-embedding-ada-002", - input=prompt, - cache={"no-store": True, "no-cache": True}, + router_model_names = ( + [m["model_name"] for m in llm_model_list] + if llm_model_list is not None + else [] ) + if llm_router is not None and self.embedding_model in router_model_names: + embedding_response = await llm_router.aembedding( + model=self.embedding_model, + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) + else: + # convert to embedding + embedding_response = await litellm.aembedding( + model=self.embedding_model, + input=prompt, + cache={"no-store": True, "no-cache": True}, + ) # get the embedding embedding = embedding_response["data"][0]["embedding"] @@ -727,6 +755,7 @@ class Cache: s3_aws_session_token: Optional[str] = None, s3_config: Optional[Any] = None, redis_semantic_cache_use_async=False, + redis_semantic_cache_embedding_model="text-embedding-ada-002", **kwargs, ): """ @@ -757,6 +786,7 @@ class Cache: password, similarity_threshold=similarity_threshold, use_async=redis_semantic_cache_use_async, + embedding_model=redis_semantic_cache_embedding_model, **kwargs, ) elif type == "local": From e74363b4808cc37173aa516d1ac4fde9977b1807 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:27:33 -0800 Subject: [PATCH 46/56] (fix) use semantic cache on proxy --- litellm/proxy/proxy_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 41c3b4182..326544f41 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -77,7 +77,7 @@ litellm_settings: cache_params: type: "redis-semantic" similarity_threshold: 0.8 - redis_semantic_cache_use_async: True + redis_semantic_cache_embedding_model: azure-embedding-model # cache: True # setting callback class # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] From c1e9041506388384091729280b15722b3e039297 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:32:07 -0800 Subject: [PATCH 47/56] (docs) using semantic caching on proxy --- docs/my-website/docs/proxy/caching.md | 52 ++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/proxy/caching.md b/docs/my-website/docs/proxy/caching.md index 03bb9fed3..3f2687824 100644 --- a/docs/my-website/docs/proxy/caching.md +++ b/docs/my-website/docs/proxy/caching.md @@ -9,7 +9,7 @@ LiteLLM supports: - Redis Cache - s3 Bucket Cache -## Quick Start - Redis, s3 Cache +## Quick Start - Redis, s3 Cache, Semantic Cache @@ -84,6 +84,56 @@ litellm_settings: $ litellm --config /path/to/config.yaml ``` + + + + +Caching can be enabled by adding the `cache` key in the `config.yaml` + +### Step 1: Add `cache` to the config.yaml +```yaml +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo + - model_name: azure-embedding-model + litellm_params: + model: azure/azure-embedding-model + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: "2023-07-01-preview" + +litellm_settings: + set_verbose: True + cache: True # set cache responses to True, litellm defaults to using a redis cache + cache_params: + type: "redis-semantic" + similarity_threshold: 0.8 # similarity threshold for semantic cache + redis_semantic_cache_embedding_model: azure-embedding-model # set this to a model_name set in model_list +``` + +### Step 2: Add Redis Credentials to .env +Set either `REDIS_URL` or the `REDIS_HOST` in your os environment, to enable caching. + + ```shell + REDIS_URL = "" # REDIS_URL='redis://username:password@hostname:port/database' + ## OR ## + REDIS_HOST = "" # REDIS_HOST='redis-18841.c274.us-east-1-3.ec2.cloud.redislabs.com' + REDIS_PORT = "" # REDIS_PORT='18841' + REDIS_PASSWORD = "" # REDIS_PASSWORD='liteLlmIsAmazing' + ``` + +**Additional kwargs** +You can pass in any additional redis.Redis arg, by storing the variable + value in your os environment, like this: +```shell +REDIS_ = "" +``` + +### Step 3: Run proxy with config +```shell +$ litellm --config /path/to/config.yaml +``` + From 755f44613d0614b771a255093added5ca8533bdf Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:35:21 -0800 Subject: [PATCH 48/56] (feat) redis-semantic cache on proxy --- litellm/proxy/proxy_server.py | 5 ++++- requirements.txt | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c2d3d194a..6f442f1ae 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1178,7 +1178,7 @@ class ProxyConfig: verbose_proxy_logger.debug(f"passed cache type={cache_type}") - if cache_type == "redis": + if cache_type == "redis" or cache_type == "redis-semantic": cache_host = litellm.get_secret("REDIS_HOST", None) cache_port = litellm.get_secret("REDIS_PORT", None) cache_password = litellm.get_secret("REDIS_PASSWORD", None) @@ -1205,6 +1205,9 @@ class ProxyConfig: f"{blue_color_code}Cache Password:{reset_color_code} {cache_password}" ) print() # noqa + if cache_type == "redis-semantic": + # by default this should always be async + cache_params.update({"redis_semantic_cache_use_async": True}) # users can pass os.environ/ variables on the proxy - we should read them from the env for key, value in cache_params.items(): diff --git a/requirements.txt b/requirements.txt index b0a49553d..3ace5872a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ gunicorn==21.2.0 # server dep boto3==1.28.58 # aws bedrock/sagemaker calls redis==4.6.0 # caching redisvl==0.0.7 # semantic caching +numpy==1.24.3 # semantic caching prisma==0.11.0 # for db mangum==0.17.0 # for aws lambda functions google-generativeai==0.3.2 # for vertex ai calls From 9f7ec4c9f9bda7d524a637ca42c07a721b47eaa9 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:39:44 -0800 Subject: [PATCH 49/56] (fix) test-semantic caching --- litellm/tests/test_caching.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index a1a42ff65..cc18dda16 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -1019,8 +1019,20 @@ def test_redis_semantic_cache_completion(): ) print(f"response1: {response1}") - assert response1.id == "chatcmpl-8p5GejSWLJ1pDI1lfhc6Idhwd2bDJ" - # assert response1.choices[0].message == 1 + random_number = random.randint(1, 100000) + + response2 = completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ], + max_tokens=20, + ) + print(f"response2: {response1}") + assert response1.id == response2.id # test_redis_cache_completion() @@ -1054,8 +1066,20 @@ async def test_redis_semantic_cache_acompletion(): "content": f"write a one sentence poem about: {random_number}", } ], - max_tokens=20, + max_tokens=5, ) print(f"response1: {response1}") - assert response1.id == "chatcmpl-8pI86yvT7fvgLDjngZSKULy1iP1o5" + random_number = random.randint(1, 100000) + response2 = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ], + max_tokens=5, + ) + print(f"response2: {response2}") + assert response1.id == response2.id From f244e568931959e1e2eeaff1bad298b16249577f Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:53:28 -0800 Subject: [PATCH 50/56] (docs) redis cache --- docs/my-website/docs/caching/redis_cache.md | 68 +++++++++++++++++++-- 1 file changed, 64 insertions(+), 4 deletions(-) diff --git a/docs/my-website/docs/caching/redis_cache.md b/docs/my-website/docs/caching/redis_cache.md index 8a580f087..7b21d35b6 100644 --- a/docs/my-website/docs/caching/redis_cache.md +++ b/docs/my-website/docs/caching/redis_cache.md @@ -1,11 +1,11 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Caching - In-Memory, Redis, s3 +# Caching - In-Memory, Redis, s3, Redis Semantic Cache [**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/caching.py) -## Initialize Cache - In Memory, Redis, s3 Bucket +## Initialize Cache - In Memory, Redis, s3 Bucket, Redis Semantic Cache @@ -18,7 +18,7 @@ pip install redis ``` For the hosted version you can setup your own Redis DB here: https://app.redislabs.com/ -### Quick Start + ```python import litellm from litellm import completion @@ -55,7 +55,7 @@ Set AWS environment variables AWS_ACCESS_KEY_ID = "AKI*******" AWS_SECRET_ACCESS_KEY = "WOl*****" ``` -### Quick Start + ```python import litellm from litellm import completion @@ -80,6 +80,66 @@ response2 = completion( + + +Install redis +```shell +pip install redisvl==0.0.7 +``` + +For the hosted version you can setup your own Redis DB here: https://app.redislabs.com/ + +```python +import litellm +from litellm import completion +from litellm.caching import Cache + +random_number = random.randint( + 1, 100000 +) # add a random number to ensure it's always adding / reading from cache + +print("testing semantic caching") +litellm.cache = Cache( + type="redis-semantic", + host=os.environ["REDIS_HOST"], + port=os.environ["REDIS_PORT"], + password=os.environ["REDIS_PASSWORD"], + similarity_threshold=0.8, + redis_semantic_cache_embedding_model="text-embedding-ada-002", # this model is passed to litellm.embedding(), any litellm.embedding() model is supported here +) +response1 = completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ], + max_tokens=20, +) +print(f"response1: {response1}") + +random_number = random.randint(1, 100000) + +response2 = completion( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ], + max_tokens=20, +) +print(f"response2: {response1}") +assert response1.id == response2.id +# response1 == response2, response 1 is cached +``` + + + + + ### Quick Start From bf6e3f807b930d9997da08a5da2f576a7b6da503 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:54:55 -0800 Subject: [PATCH 51/56] (docs) litellm semantic caching --- docs/my-website/docs/caching/redis_cache.md | 2 +- docs/my-website/docs/proxy/caching.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/caching/redis_cache.md b/docs/my-website/docs/caching/redis_cache.md index 7b21d35b6..75e1db955 100644 --- a/docs/my-website/docs/caching/redis_cache.md +++ b/docs/my-website/docs/caching/redis_cache.md @@ -104,7 +104,7 @@ litellm.cache = Cache( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], password=os.environ["REDIS_PASSWORD"], - similarity_threshold=0.8, + similarity_threshold=0.8, # similarity threshold for cache hits, 0 == no similarity, 1 = exact matches, 0.5 == 50% similarity redis_semantic_cache_embedding_model="text-embedding-ada-002", # this model is passed to litellm.embedding(), any litellm.embedding() model is supported here ) response1 = completion( diff --git a/docs/my-website/docs/proxy/caching.md b/docs/my-website/docs/proxy/caching.md index 3f2687824..d5b589e5c 100644 --- a/docs/my-website/docs/proxy/caching.md +++ b/docs/my-website/docs/proxy/caching.md @@ -7,6 +7,7 @@ Cache LLM Responses LiteLLM supports: - In Memory Cache - Redis Cache +- Redis Semantic Cache - s3 Bucket Cache ## Quick Start - Redis, s3 Cache, Semantic Cache From ba4ca4d02a27edcf93da42acb369cb3b29a9ee48 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 10:55:15 -0800 Subject: [PATCH 52/56] (fix) semantic caching --- litellm/tests/test_caching.py | 1 + 1 file changed, 1 insertion(+) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index cc18dda16..96fd8eb9d 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -1006,6 +1006,7 @@ def test_redis_semantic_cache_completion(): port=os.environ["REDIS_PORT"], password=os.environ["REDIS_PASSWORD"], similarity_threshold=0.8, + redis_semantic_cache_embedding_model="text-embedding-ada-002", ) response1 = completion( model="gpt-3.5-turbo", From d2a44de4c9fbc4c14f2f58a247492ab85c0e0e59 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 11:04:19 -0800 Subject: [PATCH 53/56] (fix) mark semantic caching as beta test --- litellm/tests/test_caching.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 96fd8eb9d..6cb5b974a 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -989,6 +989,7 @@ def test_cache_context_managers(): # test_cache_context_managers() +@pytest.mark.skip(reason="beta test - new redis semantic cache") def test_redis_semantic_cache_completion(): litellm.set_verbose = True import logging @@ -1039,6 +1040,7 @@ def test_redis_semantic_cache_completion(): # test_redis_cache_completion() +@pytest.mark.skip(reason="beta test - new redis semantic cache") @pytest.mark.asyncio async def test_redis_semantic_cache_acompletion(): litellm.set_verbose = True From f0e632ebc8a1fab37b57442a62df4dd7aef88353 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 6 Feb 2024 13:26:48 -0800 Subject: [PATCH 54/56] (ci/cd) run again --- litellm/tests/test_caching.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 6cb5b974a..8433941e9 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -998,7 +998,7 @@ def test_redis_semantic_cache_completion(): random_number = random.randint( 1, 100000 - ) # add a random number to ensure it's always adding / reading from cache + ) # add a random number to ensure it's always adding /reading from cache print("testing semantic caching") litellm.cache = Cache( From f0d4b62b6bcb494e235f8ee7dbbcfb94943ff468 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 13:29:31 -0800 Subject: [PATCH 55/56] test(test_completion.py): fix test --- docs/my-website/docs/proxy/caching.md | 7 ++++--- litellm/tests/test_completion.py | 1 - 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/my-website/docs/proxy/caching.md b/docs/my-website/docs/proxy/caching.md index d5b589e5c..2b385de8e 100644 --- a/docs/my-website/docs/proxy/caching.md +++ b/docs/my-website/docs/proxy/caching.md @@ -211,9 +211,10 @@ litellm_settings: The proxy support 3 cache-controls: -- `ttl`: Will cache the response for the user-defined amount of time (in seconds). -- `s-maxage`: Will only accept cached responses that are within user-defined range (in seconds). -- `no-cache`: Will not return a cached response, but instead call the actual endpoint. +- `ttl`: *Optional(int)* - Will cache the response for the user-defined amount of time (in seconds). +- `s-maxage`: *Optional(int)* Will only accept cached responses that are within user-defined range (in seconds). +- `no-cache`: *Optional(bool)* Will not return a cached response, but instead call the actual endpoint. +- `no-store`: *Optional(bool)* Will not cache the response. [Let us know if you need more](https://github.com/BerriAI/litellm/issues/1218) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index de79c97af..b075e4819 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -565,7 +565,6 @@ def test_completion_openai(): assert len(response_str) > 1 litellm.api_key = None - raise Exception("it works!") except Timeout as e: pass except Exception as e: From 0874c17a3149fe741884340a30c0524ced63964d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 6 Feb 2024 20:10:19 -0800 Subject: [PATCH 56/56] fix: export npm build into proxy --- litellm/proxy/_experimental/out/404.html | 2 +- ...ayout-4d667c133e03c98b.js => layout-ea657eeec2abf062.js} | 2 +- .../out/_next/static/chunks/app/page-7f03ccc8529ada97.js | 1 + .../out/_next/static/chunks/app/page-992f4cdd1053ee86.js | 1 - .../out/_next/static/chunks/main-app-096338c8e1915716.js | 2 +- .../out/_next/static/chunks/main-app-9b4fb13a7db53edf.js | 1 - .../_buildManifest.js | 0 .../_ssgManifest.js | 0 litellm/proxy/_experimental/out/index.html | 2 +- litellm/proxy/_experimental/out/index.txt | 6 +++--- litellm/proxy/proxy_server.py | 6 +++++- ui/litellm-dashboard/out/404.html | 2 +- .../out/_next/static/chunks/app/layout-4d667c133e03c98b.js | 1 - .../out/_next/static/chunks/app/page-992f4cdd1053ee86.js | 1 - .../_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js | 1 - .../out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js | 1 - ui/litellm-dashboard/out/index.html | 2 +- ui/litellm-dashboard/out/index.txt | 6 +++--- ui/litellm-dashboard/src/components/networking.tsx | 2 +- 19 files changed, 19 insertions(+), 20 deletions(-) rename litellm/proxy/_experimental/out/_next/static/chunks/app/{layout-4d667c133e03c98b.js => layout-ea657eeec2abf062.js} (60%) create mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/page-7f03ccc8529ada97.js delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/page-992f4cdd1053ee86.js rename ui/litellm-dashboard/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js => litellm/proxy/_experimental/out/_next/static/chunks/main-app-096338c8e1915716.js (54%) delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js rename litellm/proxy/_experimental/out/_next/static/{lGjwnJSGwBqa476jHHI8W => p5gDwQBbgW8D3Uz3lgoZg}/_buildManifest.js (100%) rename litellm/proxy/_experimental/out/_next/static/{lGjwnJSGwBqa476jHHI8W => p5gDwQBbgW8D3Uz3lgoZg}/_ssgManifest.js (100%) delete mode 100644 ui/litellm-dashboard/out/_next/static/chunks/app/layout-4d667c133e03c98b.js delete mode 100644 ui/litellm-dashboard/out/_next/static/chunks/app/page-992f4cdd1053ee86.js delete mode 100644 ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js delete mode 100644 ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html index 1ec6cd9a4..c57eb5193 100644 --- a/litellm/proxy/_experimental/out/404.html +++ b/litellm/proxy/_experimental/out/404.html @@ -1 +1 @@ -404: This page could not be found.Create Next App

404

This page could not be found.

\ No newline at end of file +404: This page could not be found.🚅 LiteLLM

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/app/layout-4d667c133e03c98b.js b/litellm/proxy/_experimental/out/_next/static/chunks/app/layout-ea657eeec2abf062.js similarity index 60% rename from litellm/proxy/_experimental/out/_next/static/chunks/app/layout-4d667c133e03c98b.js rename to litellm/proxy/_experimental/out/_next/static/chunks/app/layout-ea657eeec2abf062.js index e261adc05..fe5260feb 100644 --- a/litellm/proxy/_experimental/out/_next/static/chunks/app/layout-4d667c133e03c98b.js +++ b/litellm/proxy/_experimental/out/_next/static/chunks/app/layout-ea657eeec2abf062.js @@ -1 +1 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[185],{87421:function(n,e,t){Promise.resolve().then(t.t.bind(t,99646,23)),Promise.resolve().then(t.t.bind(t,63385,23))},63385:function(){},99646:function(n){n.exports={style:{fontFamily:"'__Inter_c23dc8', '__Inter_Fallback_c23dc8'",fontStyle:"normal"},className:"__className_c23dc8"}}},function(n){n.O(0,[971,69,744],function(){return n(n.s=87421)}),_N_E=n.O()}]); \ No newline at end of file +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[185],{11837:function(n,e,t){Promise.resolve().then(t.t.bind(t,99646,23)),Promise.resolve().then(t.t.bind(t,63385,23))},63385:function(){},99646:function(n){n.exports={style:{fontFamily:"'__Inter_c23dc8', '__Inter_Fallback_c23dc8'",fontStyle:"normal"},className:"__className_c23dc8"}}},function(n){n.O(0,[971,69,744],function(){return n(n.s=11837)}),_N_E=n.O()}]); \ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/app/page-7f03ccc8529ada97.js b/litellm/proxy/_experimental/out/_next/static/chunks/app/page-7f03ccc8529ada97.js new file mode 100644 index 000000000..c8c53fcee --- /dev/null +++ b/litellm/proxy/_experimental/out/_next/static/chunks/app/page-7f03ccc8529ada97.js @@ -0,0 +1 @@ +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{27376:function(e,t,l){Promise.resolve().then(l.bind(l,27680))},27680:function(e,t,l){"use strict";l.r(t),l.d(t,{default:function(){return B}});var s=l(3827),r=l(64090),a=l(80588);let n=async(e,t,l)=>{try{if(console.log("Form Values in keyCreateCall:",l),l.description&&(l.metadata||(l.metadata={}),l.metadata.description=l.description,delete l.description,l.metadata=JSON.stringify(l.metadata)),l.metadata){console.log("formValues.metadata:",l.metadata);try{l.metadata=JSON.parse(l.metadata)}catch(e){throw a.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",l);let s=await fetch("/key/generate",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...l})});if(!s.ok){let e=await s.text();throw a.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let r=await s.json();return console.log("API Response:",r),r}catch(e){throw console.error("Failed to create key:",e),e}},o=async(e,t)=>{try{console.log("in keyDeleteCall:",t),a.ZP.info("Making key delete request");let l=await fetch("/key/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:[t]})});if(!l.ok){let e=await l.text();throw a.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let s=await l.json();return console.log(s),a.ZP.success("API Key Deleted"),s}catch(e){throw console.error("Failed to create key:",e),e}},i=async(e,t,l)=>{try{let s="/user/info";"App Owner"==l&&(s="".concat(s,"/?user_id=").concat(t)),a.ZP.info("Requesting user data");let r=await fetch(s,{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!r.ok){let e=await r.text();throw a.ZP.error(e),Error("Network response was not ok")}let n=await r.json();return a.ZP.info("Received user data"),n}catch(e){throw console.error("Failed to create key:",e),e}},c=async(e,t)=>{try{let l="/spend/logs";console.log("in keySpendLogsCall:",l);let s=await fetch("".concat(l,"/?api_key=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!s.ok){let e=await s.text();throw a.ZP.error(e),Error("Network response was not ok")}let r=await s.json();return console.log(r),r}catch(e){throw console.error("Failed to create key:",e),e}};var d=l(10384),h=l(46453),u=l(2179),m=l(71801),x=l(96776),j=l(2902),p=l(77171),y=l(29714),Z=l(88707),g=l(1861);let{Option:f}=x.default;var k=e=>{let{userID:t,userRole:l,accessToken:o,data:i,setData:c}=e,[x]=j.Z.useForm(),[f,k]=(0,r.useState)(!1),[w,b]=(0,r.useState)(null),S=()=>{k(!1),x.resetFields()},N=()=>{k(!1),b(null),x.resetFields()},_=async e=>{try{a.ZP.info("Making API Call"),e.models&&""!==e.models.trim()?e.models=e.models.split(",").map(e=>e.trim()):e.models=[],k(!0);let l=await n(o,t,e);c(e=>e?[...e,l]:[l]),b(l.key),a.ZP.success("API Key Created"),x.resetFields()}catch(e){console.error("Error creating the key:",e)}};return(0,s.jsxs)("div",{children:[(0,s.jsx)(u.Z,{className:"mx-auto",onClick:()=>k(!0),children:"+ Create New Key"}),(0,s.jsx)(p.Z,{title:"Create Key",visible:f,width:800,footer:null,onOk:S,onCancel:N,children:(0,s.jsxs)(j.Z,{form:x,onFinish:_,labelCol:{span:6},wrapperCol:{span:16},labelAlign:"left",children:["App Owner"===l||"Admin"===l?(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(j.Z.Item,{label:"Key Name",name:"key_alias",children:(0,s.jsx)(y.Z,{})}),(0,s.jsx)(j.Z.Item,{label:"Team ID",name:"team_id",children:(0,s.jsx)(y.Z,{placeholder:"ai_team"})}),(0,s.jsx)(j.Z.Item,{label:"Models (Comma Separated). Eg: gpt-3.5-turbo,gpt-4",name:"models",children:(0,s.jsx)(y.Z,{placeholder:"gpt-4,gpt-3.5-turbo"})}),(0,s.jsx)(j.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,s.jsx)(Z.Z,{step:.01,precision:2,width:200})}),(0,s.jsx)(j.Z.Item,{label:"Duration (eg: 30s, 30h, 30d)",name:"duration",children:(0,s.jsx)(y.Z,{})}),(0,s.jsx)(j.Z.Item,{label:"Metadata",name:"metadata",children:(0,s.jsx)(y.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})})]}):(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(j.Z.Item,{label:"Key Name",name:"key_alias",children:(0,s.jsx)(y.Z,{})}),(0,s.jsx)(j.Z.Item,{label:"Team ID (Contact Group)",name:"team_id",children:(0,s.jsx)(y.Z,{placeholder:"ai_team"})}),(0,s.jsx)(j.Z.Item,{label:"Description",name:"description",children:(0,s.jsx)(y.Z.TextArea,{placeholder:"Enter description",rows:4})})]}),(0,s.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,s.jsx)(g.ZP,{htmlType:"submit",children:"Create Key"})})]})}),w&&(0,s.jsx)(p.Z,{title:"Save your key",visible:f,onOk:S,onCancel:N,footer:null,children:(0,s.jsxs)(h.Z,{numItems:1,className:"gap-2 w-full",children:[(0,s.jsx)(d.Z,{numColSpan:1,children:(0,s.jsxs)("p",{children:["Please save this secret key somewhere safe and accessible. For security reasons, ",(0,s.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret key, you will need to generate a new one."]})}),(0,s.jsx)(d.Z,{numColSpan:1,children:null!=w?(0,s.jsxs)(m.Z,{children:["API Key: ",w]}):(0,s.jsx)(m.Z,{children:"Key being created, this might take 30s"})})]})})]})},w=l(33393),b=l(13810),S=l(61244),N=l(10827),_=l(3851),D=l(2044),v=l(64167),C=l(74480),I=l(7178),E=l(42440),T=l(9853),A=l(67989),O=l(56863),P=e=>{let{token:t,accessToken:l,keySpend:a,keyBudget:n,keyName:o}=e,[i,d]=(0,r.useState)(!1),[h,m]=(0,r.useState)(null),[x,j]=(0,r.useState)(null),y=async()=>{try{if(null==l||null==t)return;console.log("accessToken: ".concat(l,"; token: ").concat(t));let e=await c(l,t);console.log("Response:",e);let s=Object.values(e).reduce((e,t)=>{let l=new Date(t.startTime),s=new Intl.DateTimeFormat("en-US",{day:"2-digit",month:"short"}).format(l);return e[s]=(e[s]||0)+t.spend,e},{}),r=Object.entries(s);r.sort((e,t)=>{let[l]=e,[s]=t,r=new Date(l),a=new Date(s);return r.getTime()-a.getTime()});let a=Object.fromEntries(r);console.log(a);let n=Object.values(e).reduce((e,t)=>{let l=t.user;return e[l]=(e[l]||0)+t.spend,e},{});console.log(s),console.log(n);let o=[];for(let[e,t]of Object.entries(a))o.push({day:e,spend:t});let i=Object.entries(n).sort((e,t)=>t[1]-e[1]).slice(0,5).map(e=>{let[t,l]=e;return{name:t,value:l}});m(o),j(i),console.log("arrayBarChart:",o)}catch(e){console.error("There was an error fetching the data",e)}};return t?(0,s.jsxs)("div",{children:[(0,s.jsx)(u.Z,{className:"mx-auto",onClick:()=>{console.log("Show Modal triggered"),d(!0),y()},children:"View Spend Report"}),(0,s.jsxs)(p.Z,{visible:i,width:1e3,onOk:()=>{d(!1)},onCancel:()=>{d(!1)},footer:null,children:[(0,s.jsxs)(E.Z,{style:{textAlign:"left"},children:["Key Name: ",o]}),(0,s.jsxs)(O.Z,{children:["Monthly Spend $",a]}),(0,s.jsx)(b.Z,{className:"mt-6 mb-6",children:h&&(0,s.jsx)(T.Z,{className:"mt-6",data:h,colors:["green"],index:"day",categories:["spend"],yAxisWidth:48})}),(0,s.jsx)(E.Z,{className:"mt-6",children:"Top 5 Users Spend (USD)"}),(0,s.jsx)(b.Z,{className:"mb-6",children:x&&(0,s.jsx)(A.Z,{className:"mt-6",data:x,color:"teal"})})]})]}):null},F=e=>{let{userID:t,accessToken:l,data:a,setData:n}=e,[i,c]=(0,r.useState)(!1),d=async e=>{if(null!=a)try{await o(l,e);let t=a.filter(t=>t.token!==e);n(t)}catch(e){console.error("Error deleting the key:",e)}};if(null!=a)return console.log("RERENDER TRIGGERED"),(0,s.jsxs)(b.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4",children:[(0,s.jsx)(E.Z,{children:"API Keys"}),(0,s.jsxs)(N.Z,{className:"mt-5",children:[(0,s.jsx)(v.Z,{children:(0,s.jsxs)(I.Z,{children:[(0,s.jsx)(C.Z,{children:"Key Alias"}),(0,s.jsx)(C.Z,{children:"Secret Key"}),(0,s.jsx)(C.Z,{children:"Spend (USD)"}),(0,s.jsx)(C.Z,{children:"Key Budget (USD)"}),(0,s.jsx)(C.Z,{children:"Team ID"}),(0,s.jsx)(C.Z,{children:"Metadata"}),(0,s.jsx)(C.Z,{children:"Expires"})]})}),(0,s.jsx)(_.Z,{children:a.map(e=>(console.log(e),"litellm-dashboard"===e.team_id)?null:(0,s.jsxs)(I.Z,{children:[(0,s.jsx)(D.Z,{children:null!=e.key_alias?(0,s.jsx)(m.Z,{children:e.key_alias}):(0,s.jsx)(m.Z,{children:"Not Set"})}),(0,s.jsx)(D.Z,{children:(0,s.jsx)(m.Z,{children:e.key_name})}),(0,s.jsx)(D.Z,{children:(0,s.jsx)(m.Z,{children:e.spend})}),(0,s.jsx)(D.Z,{children:null!=e.max_budget?(0,s.jsx)(m.Z,{children:e.max_budget}):(0,s.jsx)(m.Z,{children:"Unlimited Budget"})}),(0,s.jsx)(D.Z,{children:(0,s.jsx)(m.Z,{children:e.team_id})}),(0,s.jsx)(D.Z,{children:(0,s.jsx)(m.Z,{children:JSON.stringify(e.metadata)})}),(0,s.jsx)(D.Z,{children:null!=e.expires?(0,s.jsx)(m.Z,{children:e.expires}):(0,s.jsx)(m.Z,{children:"Never expires"})}),(0,s.jsx)(D.Z,{children:(0,s.jsx)(S.Z,{onClick:()=>d(e.token),icon:w.Z,size:"sm"})}),(0,s.jsx)(D.Z,{children:(0,s.jsx)(P,{token:e.token,accessToken:l,keySpend:e.spend,keyBudget:e.max_budget,keyName:e.key_name})})]},e.token))})]})]})},R=e=>{let{userID:t,userSpendData:l}=e;console.log("User SpendData:",l);let r=null==l?void 0:l.spend,a=(null==l?void 0:l.max_budget)||null,n=null!==a?"$".concat(a," limit"):"No limit";return"$".concat(r," / ").concat(n),(0,s.jsx)(s.Fragment,{children:(0,s.jsxs)(b.Z,{className:"mx-auto mb-4",children:[(0,s.jsxs)(O.Z,{children:["$",r]}),(0,s.jsxs)(E.Z,{children:["/ ",n]})]})})},U=l(8792),K=e=>{let{userID:t,userRole:l,userEmail:r}=e;return console.log("User ID:",t),console.log("userEmail:",r),(0,s.jsxs)("nav",{className:"left-0 right-0 top-0 flex justify-between items-center h-12 mb-4",children:[(0,s.jsx)("div",{className:"text-left mx-4 my-2 absolute top-0 left-0",children:(0,s.jsx)("div",{className:"flex flex-col items-center",children:(0,s.jsx)(U.default,{href:"/",children:(0,s.jsx)("button",{className:"text-gray-800 text-2xl px-4 py-1 rounded text-center",children:"\uD83D\uDE85 LiteLLM"})})})}),(0,s.jsx)("div",{className:"text-right mx-4 my-2 absolute top-0 right-0",children:(0,s.jsxs)(u.Z,{variant:"secondary",children:[r,(0,s.jsxs)("p",{children:["Role: ",l]}),(0,s.jsxs)("p",{children:["ID: ",t]})]})})]})},L=l(47907),M=l(37963);console.log("isLocal:",!1);var B=()=>{let[e,t]=(0,r.useState)(null),[l,a]=(0,r.useState)(null),n=(0,L.useSearchParams)(),o=n.get("userID");n.get("viewSpend");let c=n.get("token"),[u,m]=(0,r.useState)(null),[x,j]=(0,r.useState)(null),[p,y]=(0,r.useState)(null);if((0,r.useEffect)(()=>{if(c){let e=(0,M.o)(c);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),m(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(console.log("Received user role: ".concat(e)),e.toLowerCase()){case"app_owner":case"demo_app_owner":return"App Owner";case"app_admin":return"Admin";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),j(t)}else console.log("User role not defined");e.user_email?y(e.user_email):console.log("User Email is not set ".concat(e))}}o&&u&&x&&!e&&(async()=>{try{let e=await i(u,o,x);a(e.user_info),t(e.keys)}catch(e){console.error("There was an error fetching the data",e)}})()},[o,c,u,e]),null==o||null==c){let e="/sso/key/generate";return console.log("Full URL:",e),window.location.href=e,null}return null==u?null:(null==x&&j("App Owner"),(0,s.jsxs)("div",{children:[(0,s.jsx)(K,{userID:o,userRole:x,userEmail:p}),(0,s.jsx)(h.Z,{numItems:1,className:"gap-0 p-10 h-[75vh] w-full",children:(0,s.jsxs)(d.Z,{numColSpan:1,children:[(0,s.jsx)(R,{userID:o,userSpendData:l}),(0,s.jsx)(F,{userID:o,accessToken:u,data:e,setData:t}),(0,s.jsx)(k,{userID:o,userRole:x,accessToken:u,data:e,setData:t})]})})]}))}}},function(e){e.O(0,[787,971,69,744],function(){return e(e.s=27376)}),_N_E=e.O()}]); \ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/app/page-992f4cdd1053ee86.js b/litellm/proxy/_experimental/out/_next/static/chunks/app/page-992f4cdd1053ee86.js deleted file mode 100644 index cd4ccb43c..000000000 --- a/litellm/proxy/_experimental/out/_next/static/chunks/app/page-992f4cdd1053ee86.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{88102:function(e,t,l){Promise.resolve().then(l.bind(l,27680))},27680:function(e,t,l){"use strict";l.r(t),l.d(t,{default:function(){return M}});var r=l(3827),s=l(64090),n=l(80588);let a=async(e,t,l)=>{try{if(console.log("Form Values in keyCreateCall:",l),l.description&&(l.metadata||(l.metadata={}),l.metadata.description=l.description,delete l.description,l.metadata=JSON.stringify(l.metadata)),l.metadata){console.log("formValues.metadata:",l.metadata);try{l.metadata=JSON.parse(l.metadata)}catch(e){throw n.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",l);let r=await fetch("/key/generate",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...l})});if(!r.ok){let e=await r.text();throw n.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let s=await r.json();return console.log("API Response:",s),s}catch(e){throw console.error("Failed to create key:",e),e}},o=async(e,t)=>{try{console.log("in keyDeleteCall:",t);let l=await fetch("/key/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:[t]})});if(!l.ok){let e=await l.text();throw n.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let r=await l.json();return console.log(r),n.ZP.success("API Key Deleted"),r}catch(e){throw console.error("Failed to create key:",e),e}},i=async(e,t)=>{try{let l="/user/info";console.log("in userInfoCall:",l);let r=await fetch("".concat(l,"/?user_id=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!r.ok){let e=await r.text();throw n.ZP.error(e),Error("Network response was not ok")}let s=await r.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},c=async(e,t)=>{try{let l="/spend/logs";console.log("in keySpendLogsCall:",l);let r=await fetch("".concat(l,"/?api_key=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!r.ok){let e=await r.text();throw n.ZP.error(e),Error("Network response was not ok")}let s=await r.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}};var d=l(10384),h=l(46453),u=l(2179),m=l(71801),x=l(96776),j=l(2902),p=l(77171),y=l(29714),Z=l(88707),g=l(1861);let{Option:f}=x.default;var w=e=>{let{userID:t,userRole:l,accessToken:o,data:i,setData:c}=e,[x]=j.Z.useForm(),[f,w]=(0,s.useState)(!1),[k,b]=(0,s.useState)(null),N=()=>{w(!1),x.resetFields()},S=()=>{w(!1),b(null),x.resetFields()},_=async e=>{try{n.ZP.info("Making API Call"),e.models&&""!==e.models.trim()?e.models=e.models.split(",").map(e=>e.trim()):e.models=[],w(!0);let l=await a(o,t,e);c(e=>e?[...e,l]:[l]),b(l.key),n.ZP.success("API Key Created"),x.resetFields()}catch(e){console.error("Error creating the key:",e)}};return(0,r.jsxs)("div",{children:[(0,r.jsx)(u.Z,{className:"mx-auto",onClick:()=>w(!0),children:"+ Create New Key"}),(0,r.jsx)(p.Z,{title:"Create Key",visible:f,width:800,footer:null,onOk:N,onCancel:S,children:(0,r.jsxs)(j.Z,{form:x,onFinish:_,labelCol:{span:6},wrapperCol:{span:16},labelAlign:"left",children:["App Owner"===l||"Admin"===l?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(j.Z.Item,{label:"Key Name",name:"key_alias",children:(0,r.jsx)(y.Z,{})}),(0,r.jsx)(j.Z.Item,{label:"Team ID",name:"team_id",children:(0,r.jsx)(y.Z,{placeholder:"ai_team"})}),(0,r.jsx)(j.Z.Item,{label:"Models (Comma Separated). Eg: gpt-3.5-turbo,gpt-4",name:"models",children:(0,r.jsx)(y.Z,{placeholder:"gpt-4,gpt-3.5-turbo"})}),(0,r.jsx)(j.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,r.jsx)(Z.Z,{step:.01,precision:2,width:200})}),(0,r.jsx)(j.Z.Item,{label:"Duration (eg: 30s, 30h, 30d)",name:"duration",children:(0,r.jsx)(y.Z,{})}),(0,r.jsx)(j.Z.Item,{label:"Metadata",name:"metadata",children:(0,r.jsx)(y.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})})]}):(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(j.Z.Item,{label:"Key Name",name:"key_alias",children:(0,r.jsx)(y.Z,{})}),(0,r.jsx)(j.Z.Item,{label:"Team ID (Contact Group)",name:"team_id",children:(0,r.jsx)(y.Z,{placeholder:"ai_team"})}),(0,r.jsx)(j.Z.Item,{label:"Description",name:"description",children:(0,r.jsx)(y.Z.TextArea,{placeholder:"Enter description",rows:4})})]}),(0,r.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,r.jsx)(g.ZP,{htmlType:"submit",children:"Create Key"})})]})}),k&&(0,r.jsx)(p.Z,{title:"Save your key",visible:f,onOk:N,onCancel:S,footer:null,children:(0,r.jsxs)(h.Z,{numItems:1,className:"gap-2 w-full",children:[(0,r.jsx)(d.Z,{numColSpan:1,children:(0,r.jsxs)("p",{children:["Please save this secret key somewhere safe and accessible. For security reasons, ",(0,r.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret key, you will need to generate a new one."]})}),(0,r.jsx)(d.Z,{numColSpan:1,children:null!=k?(0,r.jsxs)(m.Z,{children:["API Key: ",k]}):(0,r.jsx)(m.Z,{children:"Key being created, this might take 30s"})})]})})]})},k=l(33393),b=l(13810),N=l(61244),S=l(10827),_=l(3851),D=l(2044),C=l(64167),v=l(74480),I=l(7178),E=l(42440),T=l(9853),A=l(67989),O=l(56863),F=e=>{let{token:t,accessToken:l,keySpend:n,keyBudget:a,keyName:o}=e,[i,d]=(0,s.useState)(!1),[h,m]=(0,s.useState)(null),[x,j]=(0,s.useState)(null),y=async()=>{try{if(null==l||null==t)return;let e=await c(l,t);console.log("Response:",e);let r=Object.values(e).reduce((e,t)=>{let l=new Date(t.startTime),r=new Intl.DateTimeFormat("en-US",{day:"2-digit",month:"short"}).format(l);return e[r]=(e[r]||0)+t.spend,e},{}),s=Object.entries(r);s.sort((e,t)=>{let[l]=e,[r]=t,s=new Date(l),n=new Date(r);return s.getTime()-n.getTime()});let n=Object.fromEntries(s);console.log(n);let a=Object.values(e).reduce((e,t)=>{let l=t.user;return e[l]=(e[l]||0)+t.spend,e},{});console.log(r),console.log(a);let o=[];for(let[e,t]of Object.entries(n))o.push({day:e,spend:t});let i=Object.entries(a).sort((e,t)=>t[1]-e[1]).slice(0,5).map(e=>{let[t,l]=e;return{name:t,value:l}});m(o),j(i),console.log("arrayBarChart:",o)}catch(e){console.error("There was an error fetching the data",e)}};return((0,s.useEffect)(()=>{y()},[t]),t)?(0,r.jsxs)("div",{children:[(0,r.jsx)(u.Z,{className:"mx-auto",onClick:()=>{d(!0)},children:"View Spend Report"}),(0,r.jsxs)(p.Z,{visible:i,width:1e3,onOk:()=>{d(!1)},onCancel:()=>{d(!1)},footer:null,children:[(0,r.jsxs)(E.Z,{style:{textAlign:"left"},children:["Key Name: ",o]}),(0,r.jsxs)(O.Z,{children:["Monthly Spend $",n]}),(0,r.jsx)(b.Z,{className:"mt-6 mb-6",children:h&&(0,r.jsx)(T.Z,{className:"mt-6",data:h,colors:["green"],index:"day",categories:["spend"],yAxisWidth:48})}),(0,r.jsx)(E.Z,{className:"mt-6",children:"Top 5 Users Spend (USD)"}),(0,r.jsx)(b.Z,{className:"mb-6",children:x&&(0,r.jsx)(A.Z,{className:"mt-6",data:x,color:"teal"})})]})]}):null},P=e=>{let{userID:t,accessToken:l,data:s,setData:n}=e,a=async e=>{if(null!=s)try{await o(l,e);let t=s.filter(t=>t.token!==e);n(t)}catch(e){console.error("Error deleting the key:",e)}};if(null!=s)return console.log("RERENDER TRIGGERED"),(0,r.jsxs)(b.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4",children:[(0,r.jsx)(E.Z,{children:"API Keys"}),(0,r.jsxs)(S.Z,{className:"mt-5",children:[(0,r.jsx)(C.Z,{children:(0,r.jsxs)(I.Z,{children:[(0,r.jsx)(v.Z,{children:"Key Alias"}),(0,r.jsx)(v.Z,{children:"Secret Key"}),(0,r.jsx)(v.Z,{children:"Spend (USD)"}),(0,r.jsx)(v.Z,{children:"Key Budget (USD)"}),(0,r.jsx)(v.Z,{children:"Team ID"}),(0,r.jsx)(v.Z,{children:"Metadata"}),(0,r.jsx)(v.Z,{children:"Expires"})]})}),(0,r.jsx)(_.Z,{children:s.map(e=>(console.log(e),"litellm-dashboard"===e.team_id)?null:(0,r.jsxs)(I.Z,{children:[(0,r.jsx)(D.Z,{children:null!=e.key_alias?(0,r.jsx)(m.Z,{children:e.key_alias}):(0,r.jsx)(m.Z,{children:"Not Set"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:e.key_name})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:e.spend})}),(0,r.jsx)(D.Z,{children:null!=e.max_budget?(0,r.jsx)(m.Z,{children:e.max_budget}):(0,r.jsx)(m.Z,{children:"Unlimited Budget"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:e.team_id})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:JSON.stringify(e.metadata)})}),(0,r.jsx)(D.Z,{children:null!=e.expires?(0,r.jsx)(m.Z,{children:e.expires}):(0,r.jsx)(m.Z,{children:"Never expires"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(N.Z,{onClick:()=>a(e.token),icon:k.Z,size:"sm"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(F,{token:e.token,accessToken:l,keySpend:e.spend,keyBudget:e.max_budget,keyName:e.key_name})})]},e.token))})]})]})},R=e=>{let{userID:t,userSpendData:l}=e;console.log("User SpendData:",l);let s=null==l?void 0:l.spend,n=(null==l?void 0:l.max_budget)||null,a=null!==n?"$".concat(n," limit"):"No limit";return"$".concat(s," / ").concat(a),(0,r.jsx)(r.Fragment,{children:(0,r.jsxs)(b.Z,{className:"mx-auto mb-4",children:[(0,r.jsxs)(O.Z,{children:["$",s]}),(0,r.jsxs)(E.Z,{children:["/ ",a]})]})})},K=l(8792),U=e=>{let{userID:t,userRole:l}=e;return console.log("User ID:",t),(0,r.jsxs)("nav",{className:"left-0 right-0 top-0 flex justify-between items-center h-12 mb-4",children:[(0,r.jsx)("div",{className:"text-left mx-4 my-2 absolute top-0 left-0",children:(0,r.jsx)("div",{className:"flex flex-col items-center",children:(0,r.jsx)(K.default,{href:"/",children:(0,r.jsx)("button",{className:"text-gray-800 text-2xl px-4 py-1 rounded text-center",children:"\uD83D\uDE85 LiteLLM"})})})}),(0,r.jsx)("div",{className:"text-right mx-4 my-2 absolute top-0 right-0",children:(0,r.jsxs)(u.Z,{variant:"secondary",children:[t,(0,r.jsxs)("p",{children:["Role: ",l]})]})})]})},B=l(47907),L=l(37963),M=()=>{let[e,t]=(0,s.useState)(null),[l,n]=(0,s.useState)(null),a=(0,B.useSearchParams)(),o=a.get("userID");a.get("viewSpend");let c=a.get("token"),[u,m]=(0,s.useState)(null),[x,j]=(0,s.useState)(null);if((0,s.useEffect)(()=>{if(c){let e=(0,L.o)(c);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),m(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(e.toLowerCase()){case"app_owner":return"App Owner";case"demo_app_owner":return"AppOwner";case"admin":return"Admin";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),j(t)}else console.log("User role not defined")}}o&&u&&!e&&(async()=>{try{let e=await i(u,o);console.log("Response:",e),n(e.user_info),t(e.keys)}catch(e){console.error("There was an error fetching the data",e)}})()},[o,c,u,e]),null==o||null==c){let e="/sso/key/generate";return console.log("Full URL:",e),window.location.href=e,null}return null==u?null:(null==x&&j("App Owner"),(0,r.jsxs)("div",{children:[(0,r.jsx)(U,{userID:o,userRole:x}),(0,r.jsx)(h.Z,{numItems:1,className:"gap-0 p-10 h-[75vh] w-full",children:(0,r.jsxs)(d.Z,{numColSpan:1,children:[(0,r.jsx)(R,{userID:o,userSpendData:l}),(0,r.jsx)(P,{userID:o,accessToken:u,data:e,setData:t}),(0,r.jsx)(w,{userID:o,userRole:x,accessToken:u,data:e,setData:t})]})})]}))}}},function(e){e.O(0,[787,971,69,744],function(){return e(e.s=88102)}),_N_E=e.O()}]); \ No newline at end of file diff --git a/ui/litellm-dashboard/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js b/litellm/proxy/_experimental/out/_next/static/chunks/main-app-096338c8e1915716.js similarity index 54% rename from ui/litellm-dashboard/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js rename to litellm/proxy/_experimental/out/_next/static/chunks/main-app-096338c8e1915716.js index 440df3cb3..421ae3e2c 100644 --- a/ui/litellm-dashboard/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js +++ b/litellm/proxy/_experimental/out/_next/static/chunks/main-app-096338c8e1915716.js @@ -1 +1 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[744],{32028:function(e,n,t){Promise.resolve().then(t.t.bind(t,47690,23)),Promise.resolve().then(t.t.bind(t,48955,23)),Promise.resolve().then(t.t.bind(t,5613,23)),Promise.resolve().then(t.t.bind(t,11902,23)),Promise.resolve().then(t.t.bind(t,31778,23)),Promise.resolve().then(t.t.bind(t,77831,23))}},function(e){var n=function(n){return e(e.s=n)};e.O(0,[971,69],function(){return n(35317),n(32028)}),_N_E=e.O()}]); \ No newline at end of file +(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[744],{70377:function(e,n,t){Promise.resolve().then(t.t.bind(t,47690,23)),Promise.resolve().then(t.t.bind(t,48955,23)),Promise.resolve().then(t.t.bind(t,5613,23)),Promise.resolve().then(t.t.bind(t,11902,23)),Promise.resolve().then(t.t.bind(t,31778,23)),Promise.resolve().then(t.t.bind(t,77831,23))}},function(e){var n=function(n){return e(e.s=n)};e.O(0,[971,69],function(){return n(35317),n(70377)}),_N_E=e.O()}]); \ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js b/litellm/proxy/_experimental/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js deleted file mode 100644 index 440df3cb3..000000000 --- a/litellm/proxy/_experimental/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[744],{32028:function(e,n,t){Promise.resolve().then(t.t.bind(t,47690,23)),Promise.resolve().then(t.t.bind(t,48955,23)),Promise.resolve().then(t.t.bind(t,5613,23)),Promise.resolve().then(t.t.bind(t,11902,23)),Promise.resolve().then(t.t.bind(t,31778,23)),Promise.resolve().then(t.t.bind(t,77831,23))}},function(e){var n=function(n){return e(e.s=n)};e.O(0,[971,69],function(){return n(35317),n(32028)}),_N_E=e.O()}]); \ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js b/litellm/proxy/_experimental/out/_next/static/p5gDwQBbgW8D3Uz3lgoZg/_buildManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js rename to litellm/proxy/_experimental/out/_next/static/p5gDwQBbgW8D3Uz3lgoZg/_buildManifest.js diff --git a/litellm/proxy/_experimental/out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js b/litellm/proxy/_experimental/out/_next/static/p5gDwQBbgW8D3Uz3lgoZg/_ssgManifest.js similarity index 100% rename from litellm/proxy/_experimental/out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js rename to litellm/proxy/_experimental/out/_next/static/p5gDwQBbgW8D3Uz3lgoZg/_ssgManifest.js diff --git a/litellm/proxy/_experimental/out/index.html b/litellm/proxy/_experimental/out/index.html index f0fb6f14c..9537fb723 100644 --- a/litellm/proxy/_experimental/out/index.html +++ b/litellm/proxy/_experimental/out/index.html @@ -1 +1 @@ -Create Next App
Loading...
\ No newline at end of file +🚅 LiteLLM
Loading...
\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/index.txt b/litellm/proxy/_experimental/out/index.txt index db2a9c631..f48954f2c 100644 --- a/litellm/proxy/_experimental/out/index.txt +++ b/litellm/proxy/_experimental/out/index.txt @@ -1,7 +1,7 @@ 2:"$Sreact.suspense" -3:I[27680,["787","static/chunks/787-5bb33960644f5c7c.js","931","static/chunks/app/page-992f4cdd1053ee86.js"],""] +3:I[27680,["787","static/chunks/787-5bb33960644f5c7c.js","931","static/chunks/app/page-7f03ccc8529ada97.js"],""] 4:I[5613,[],""] 5:I[31778,[],""] -0:["lGjwnJSGwBqa476jHHI8W",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","$2",null,{"fallback":["$","div",null,{"children":"Loading..."}],"children":["$","div",null,{"className":"flex min-h-screen flex-col ","children":["$","$L3",null,{}]}]}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_c23dc8","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/a6a9860a7fe022a9.css","precedence":"next","crossOrigin":""}]],"$L6"]]]] -6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"Create Next App"}],["$","meta","3",{"name":"description","content":"Generated by create next app"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]] +0:["p5gDwQBbgW8D3Uz3lgoZg",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","$2",null,{"fallback":["$","div",null,{"children":"Loading..."}],"children":["$","div",null,{"className":"flex min-h-screen flex-col ","children":["$","$L3",null,{}]}]}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_c23dc8","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/a6a9860a7fe022a9.css","precedence":"next","crossOrigin":""}]],"$L6"]]]] +6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"🚅 LiteLLM"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]] 1:null diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 3b8b5a3b3..95c2f2ccb 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1889,6 +1889,7 @@ async def startup_event(): user_id = "default_user_id" if os.getenv("PROXY_ADMIN_ID", None) is not None: user_id = os.getenv("PROXY_ADMIN_ID") + asyncio.create_task( generate_key_helper_fn( duration=None, @@ -1899,6 +1900,10 @@ async def startup_event(): token=master_key, user_id=user_id, user_role="proxy_admin", + query_type="update_data", + update_key_values={ + "user_role": "proxy_admin", + }, ) ) @@ -3461,7 +3466,6 @@ async def auth_callback(request: Request): response = await generate_key_helper_fn( **{"duration": "1hr", "key_max_budget": 0, "models": [], "aliases": {}, "config": {}, "spend": 0, "user_id": user_id, "team_id": "litellm-dashboard", "user_email": user_email} # type: ignore ) - key = response["token"] # type: ignore user_id = response["user_id"] # type: ignore diff --git a/ui/litellm-dashboard/out/404.html b/ui/litellm-dashboard/out/404.html index 1ec6cd9a4..c57eb5193 100644 --- a/ui/litellm-dashboard/out/404.html +++ b/ui/litellm-dashboard/out/404.html @@ -1 +1 @@ -404: This page could not be found.Create Next App

404

This page could not be found.

\ No newline at end of file +404: This page could not be found.🚅 LiteLLM

404

This page could not be found.

\ No newline at end of file diff --git a/ui/litellm-dashboard/out/_next/static/chunks/app/layout-4d667c133e03c98b.js b/ui/litellm-dashboard/out/_next/static/chunks/app/layout-4d667c133e03c98b.js deleted file mode 100644 index e261adc05..000000000 --- a/ui/litellm-dashboard/out/_next/static/chunks/app/layout-4d667c133e03c98b.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[185],{87421:function(n,e,t){Promise.resolve().then(t.t.bind(t,99646,23)),Promise.resolve().then(t.t.bind(t,63385,23))},63385:function(){},99646:function(n){n.exports={style:{fontFamily:"'__Inter_c23dc8', '__Inter_Fallback_c23dc8'",fontStyle:"normal"},className:"__className_c23dc8"}}},function(n){n.O(0,[971,69,744],function(){return n(n.s=87421)}),_N_E=n.O()}]); \ No newline at end of file diff --git a/ui/litellm-dashboard/out/_next/static/chunks/app/page-992f4cdd1053ee86.js b/ui/litellm-dashboard/out/_next/static/chunks/app/page-992f4cdd1053ee86.js deleted file mode 100644 index cd4ccb43c..000000000 --- a/ui/litellm-dashboard/out/_next/static/chunks/app/page-992f4cdd1053ee86.js +++ /dev/null @@ -1 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[931],{88102:function(e,t,l){Promise.resolve().then(l.bind(l,27680))},27680:function(e,t,l){"use strict";l.r(t),l.d(t,{default:function(){return M}});var r=l(3827),s=l(64090),n=l(80588);let a=async(e,t,l)=>{try{if(console.log("Form Values in keyCreateCall:",l),l.description&&(l.metadata||(l.metadata={}),l.metadata.description=l.description,delete l.description,l.metadata=JSON.stringify(l.metadata)),l.metadata){console.log("formValues.metadata:",l.metadata);try{l.metadata=JSON.parse(l.metadata)}catch(e){throw n.ZP.error("Failed to parse metadata: "+e),Error("Failed to parse metadata: "+e)}}console.log("Form Values after check:",l);let r=await fetch("/key/generate",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({user_id:t,...l})});if(!r.ok){let e=await r.text();throw n.ZP.error("Failed to create key: "+e),console.error("Error response from the server:",e),Error("Network response was not ok")}let s=await r.json();return console.log("API Response:",s),s}catch(e){throw console.error("Failed to create key:",e),e}},o=async(e,t)=>{try{console.log("in keyDeleteCall:",t);let l=await fetch("/key/delete",{method:"POST",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"},body:JSON.stringify({keys:[t]})});if(!l.ok){let e=await l.text();throw n.ZP.error("Failed to delete key: "+e),Error("Network response was not ok")}let r=await l.json();return console.log(r),n.ZP.success("API Key Deleted"),r}catch(e){throw console.error("Failed to create key:",e),e}},i=async(e,t)=>{try{let l="/user/info";console.log("in userInfoCall:",l);let r=await fetch("".concat(l,"/?user_id=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!r.ok){let e=await r.text();throw n.ZP.error(e),Error("Network response was not ok")}let s=await r.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}},c=async(e,t)=>{try{let l="/spend/logs";console.log("in keySpendLogsCall:",l);let r=await fetch("".concat(l,"/?api_key=").concat(t),{method:"GET",headers:{Authorization:"Bearer ".concat(e),"Content-Type":"application/json"}});if(!r.ok){let e=await r.text();throw n.ZP.error(e),Error("Network response was not ok")}let s=await r.json();return console.log(s),s}catch(e){throw console.error("Failed to create key:",e),e}};var d=l(10384),h=l(46453),u=l(2179),m=l(71801),x=l(96776),j=l(2902),p=l(77171),y=l(29714),Z=l(88707),g=l(1861);let{Option:f}=x.default;var w=e=>{let{userID:t,userRole:l,accessToken:o,data:i,setData:c}=e,[x]=j.Z.useForm(),[f,w]=(0,s.useState)(!1),[k,b]=(0,s.useState)(null),N=()=>{w(!1),x.resetFields()},S=()=>{w(!1),b(null),x.resetFields()},_=async e=>{try{n.ZP.info("Making API Call"),e.models&&""!==e.models.trim()?e.models=e.models.split(",").map(e=>e.trim()):e.models=[],w(!0);let l=await a(o,t,e);c(e=>e?[...e,l]:[l]),b(l.key),n.ZP.success("API Key Created"),x.resetFields()}catch(e){console.error("Error creating the key:",e)}};return(0,r.jsxs)("div",{children:[(0,r.jsx)(u.Z,{className:"mx-auto",onClick:()=>w(!0),children:"+ Create New Key"}),(0,r.jsx)(p.Z,{title:"Create Key",visible:f,width:800,footer:null,onOk:N,onCancel:S,children:(0,r.jsxs)(j.Z,{form:x,onFinish:_,labelCol:{span:6},wrapperCol:{span:16},labelAlign:"left",children:["App Owner"===l||"Admin"===l?(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(j.Z.Item,{label:"Key Name",name:"key_alias",children:(0,r.jsx)(y.Z,{})}),(0,r.jsx)(j.Z.Item,{label:"Team ID",name:"team_id",children:(0,r.jsx)(y.Z,{placeholder:"ai_team"})}),(0,r.jsx)(j.Z.Item,{label:"Models (Comma Separated). Eg: gpt-3.5-turbo,gpt-4",name:"models",children:(0,r.jsx)(y.Z,{placeholder:"gpt-4,gpt-3.5-turbo"})}),(0,r.jsx)(j.Z.Item,{label:"Max Budget (USD)",name:"max_budget",children:(0,r.jsx)(Z.Z,{step:.01,precision:2,width:200})}),(0,r.jsx)(j.Z.Item,{label:"Duration (eg: 30s, 30h, 30d)",name:"duration",children:(0,r.jsx)(y.Z,{})}),(0,r.jsx)(j.Z.Item,{label:"Metadata",name:"metadata",children:(0,r.jsx)(y.Z.TextArea,{rows:4,placeholder:"Enter metadata as JSON"})})]}):(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(j.Z.Item,{label:"Key Name",name:"key_alias",children:(0,r.jsx)(y.Z,{})}),(0,r.jsx)(j.Z.Item,{label:"Team ID (Contact Group)",name:"team_id",children:(0,r.jsx)(y.Z,{placeholder:"ai_team"})}),(0,r.jsx)(j.Z.Item,{label:"Description",name:"description",children:(0,r.jsx)(y.Z.TextArea,{placeholder:"Enter description",rows:4})})]}),(0,r.jsx)("div",{style:{textAlign:"right",marginTop:"10px"},children:(0,r.jsx)(g.ZP,{htmlType:"submit",children:"Create Key"})})]})}),k&&(0,r.jsx)(p.Z,{title:"Save your key",visible:f,onOk:N,onCancel:S,footer:null,children:(0,r.jsxs)(h.Z,{numItems:1,className:"gap-2 w-full",children:[(0,r.jsx)(d.Z,{numColSpan:1,children:(0,r.jsxs)("p",{children:["Please save this secret key somewhere safe and accessible. For security reasons, ",(0,r.jsx)("b",{children:"you will not be able to view it again"})," ","through your LiteLLM account. If you lose this secret key, you will need to generate a new one."]})}),(0,r.jsx)(d.Z,{numColSpan:1,children:null!=k?(0,r.jsxs)(m.Z,{children:["API Key: ",k]}):(0,r.jsx)(m.Z,{children:"Key being created, this might take 30s"})})]})})]})},k=l(33393),b=l(13810),N=l(61244),S=l(10827),_=l(3851),D=l(2044),C=l(64167),v=l(74480),I=l(7178),E=l(42440),T=l(9853),A=l(67989),O=l(56863),F=e=>{let{token:t,accessToken:l,keySpend:n,keyBudget:a,keyName:o}=e,[i,d]=(0,s.useState)(!1),[h,m]=(0,s.useState)(null),[x,j]=(0,s.useState)(null),y=async()=>{try{if(null==l||null==t)return;let e=await c(l,t);console.log("Response:",e);let r=Object.values(e).reduce((e,t)=>{let l=new Date(t.startTime),r=new Intl.DateTimeFormat("en-US",{day:"2-digit",month:"short"}).format(l);return e[r]=(e[r]||0)+t.spend,e},{}),s=Object.entries(r);s.sort((e,t)=>{let[l]=e,[r]=t,s=new Date(l),n=new Date(r);return s.getTime()-n.getTime()});let n=Object.fromEntries(s);console.log(n);let a=Object.values(e).reduce((e,t)=>{let l=t.user;return e[l]=(e[l]||0)+t.spend,e},{});console.log(r),console.log(a);let o=[];for(let[e,t]of Object.entries(n))o.push({day:e,spend:t});let i=Object.entries(a).sort((e,t)=>t[1]-e[1]).slice(0,5).map(e=>{let[t,l]=e;return{name:t,value:l}});m(o),j(i),console.log("arrayBarChart:",o)}catch(e){console.error("There was an error fetching the data",e)}};return((0,s.useEffect)(()=>{y()},[t]),t)?(0,r.jsxs)("div",{children:[(0,r.jsx)(u.Z,{className:"mx-auto",onClick:()=>{d(!0)},children:"View Spend Report"}),(0,r.jsxs)(p.Z,{visible:i,width:1e3,onOk:()=>{d(!1)},onCancel:()=>{d(!1)},footer:null,children:[(0,r.jsxs)(E.Z,{style:{textAlign:"left"},children:["Key Name: ",o]}),(0,r.jsxs)(O.Z,{children:["Monthly Spend $",n]}),(0,r.jsx)(b.Z,{className:"mt-6 mb-6",children:h&&(0,r.jsx)(T.Z,{className:"mt-6",data:h,colors:["green"],index:"day",categories:["spend"],yAxisWidth:48})}),(0,r.jsx)(E.Z,{className:"mt-6",children:"Top 5 Users Spend (USD)"}),(0,r.jsx)(b.Z,{className:"mb-6",children:x&&(0,r.jsx)(A.Z,{className:"mt-6",data:x,color:"teal"})})]})]}):null},P=e=>{let{userID:t,accessToken:l,data:s,setData:n}=e,a=async e=>{if(null!=s)try{await o(l,e);let t=s.filter(t=>t.token!==e);n(t)}catch(e){console.error("Error deleting the key:",e)}};if(null!=s)return console.log("RERENDER TRIGGERED"),(0,r.jsxs)(b.Z,{className:"w-full mx-auto flex-auto overflow-y-auto max-h-[50vh] mb-4",children:[(0,r.jsx)(E.Z,{children:"API Keys"}),(0,r.jsxs)(S.Z,{className:"mt-5",children:[(0,r.jsx)(C.Z,{children:(0,r.jsxs)(I.Z,{children:[(0,r.jsx)(v.Z,{children:"Key Alias"}),(0,r.jsx)(v.Z,{children:"Secret Key"}),(0,r.jsx)(v.Z,{children:"Spend (USD)"}),(0,r.jsx)(v.Z,{children:"Key Budget (USD)"}),(0,r.jsx)(v.Z,{children:"Team ID"}),(0,r.jsx)(v.Z,{children:"Metadata"}),(0,r.jsx)(v.Z,{children:"Expires"})]})}),(0,r.jsx)(_.Z,{children:s.map(e=>(console.log(e),"litellm-dashboard"===e.team_id)?null:(0,r.jsxs)(I.Z,{children:[(0,r.jsx)(D.Z,{children:null!=e.key_alias?(0,r.jsx)(m.Z,{children:e.key_alias}):(0,r.jsx)(m.Z,{children:"Not Set"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:e.key_name})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:e.spend})}),(0,r.jsx)(D.Z,{children:null!=e.max_budget?(0,r.jsx)(m.Z,{children:e.max_budget}):(0,r.jsx)(m.Z,{children:"Unlimited Budget"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:e.team_id})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(m.Z,{children:JSON.stringify(e.metadata)})}),(0,r.jsx)(D.Z,{children:null!=e.expires?(0,r.jsx)(m.Z,{children:e.expires}):(0,r.jsx)(m.Z,{children:"Never expires"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(N.Z,{onClick:()=>a(e.token),icon:k.Z,size:"sm"})}),(0,r.jsx)(D.Z,{children:(0,r.jsx)(F,{token:e.token,accessToken:l,keySpend:e.spend,keyBudget:e.max_budget,keyName:e.key_name})})]},e.token))})]})]})},R=e=>{let{userID:t,userSpendData:l}=e;console.log("User SpendData:",l);let s=null==l?void 0:l.spend,n=(null==l?void 0:l.max_budget)||null,a=null!==n?"$".concat(n," limit"):"No limit";return"$".concat(s," / ").concat(a),(0,r.jsx)(r.Fragment,{children:(0,r.jsxs)(b.Z,{className:"mx-auto mb-4",children:[(0,r.jsxs)(O.Z,{children:["$",s]}),(0,r.jsxs)(E.Z,{children:["/ ",a]})]})})},K=l(8792),U=e=>{let{userID:t,userRole:l}=e;return console.log("User ID:",t),(0,r.jsxs)("nav",{className:"left-0 right-0 top-0 flex justify-between items-center h-12 mb-4",children:[(0,r.jsx)("div",{className:"text-left mx-4 my-2 absolute top-0 left-0",children:(0,r.jsx)("div",{className:"flex flex-col items-center",children:(0,r.jsx)(K.default,{href:"/",children:(0,r.jsx)("button",{className:"text-gray-800 text-2xl px-4 py-1 rounded text-center",children:"\uD83D\uDE85 LiteLLM"})})})}),(0,r.jsx)("div",{className:"text-right mx-4 my-2 absolute top-0 right-0",children:(0,r.jsxs)(u.Z,{variant:"secondary",children:[t,(0,r.jsxs)("p",{children:["Role: ",l]})]})})]})},B=l(47907),L=l(37963),M=()=>{let[e,t]=(0,s.useState)(null),[l,n]=(0,s.useState)(null),a=(0,B.useSearchParams)(),o=a.get("userID");a.get("viewSpend");let c=a.get("token"),[u,m]=(0,s.useState)(null),[x,j]=(0,s.useState)(null);if((0,s.useEffect)(()=>{if(c){let e=(0,L.o)(c);if(e){if(console.log("Decoded token:",e),console.log("Decoded key:",e.key),m(e.key),e.user_role){let t=function(e){if(!e)return"Undefined Role";switch(e.toLowerCase()){case"app_owner":return"App Owner";case"demo_app_owner":return"AppOwner";case"admin":return"Admin";case"app_user":return"App User";default:return"Unknown Role"}}(e.user_role);console.log("Decoded user_role:",t),j(t)}else console.log("User role not defined")}}o&&u&&!e&&(async()=>{try{let e=await i(u,o);console.log("Response:",e),n(e.user_info),t(e.keys)}catch(e){console.error("There was an error fetching the data",e)}})()},[o,c,u,e]),null==o||null==c){let e="/sso/key/generate";return console.log("Full URL:",e),window.location.href=e,null}return null==u?null:(null==x&&j("App Owner"),(0,r.jsxs)("div",{children:[(0,r.jsx)(U,{userID:o,userRole:x}),(0,r.jsx)(h.Z,{numItems:1,className:"gap-0 p-10 h-[75vh] w-full",children:(0,r.jsxs)(d.Z,{numColSpan:1,children:[(0,r.jsx)(R,{userID:o,userSpendData:l}),(0,r.jsx)(P,{userID:o,accessToken:u,data:e,setData:t}),(0,r.jsx)(w,{userID:o,userRole:x,accessToken:u,data:e,setData:t})]})})]}))}}},function(e){e.O(0,[787,971,69,744],function(){return e(e.s=88102)}),_N_E=e.O()}]); \ No newline at end of file diff --git a/ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js b/ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js deleted file mode 100644 index f779caa02..000000000 --- a/ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_buildManifest.js +++ /dev/null @@ -1 +0,0 @@ -self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/_error":["static/chunks/pages/_error-d6107f1aac0c574c.js"],sortedPages:["/_app","/_error"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB(); \ No newline at end of file diff --git a/ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js b/ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js deleted file mode 100644 index 5b3ff592f..000000000 --- a/ui/litellm-dashboard/out/_next/static/lGjwnJSGwBqa476jHHI8W/_ssgManifest.js +++ /dev/null @@ -1 +0,0 @@ -self.__SSG_MANIFEST=new Set([]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB() \ No newline at end of file diff --git a/ui/litellm-dashboard/out/index.html b/ui/litellm-dashboard/out/index.html index f0fb6f14c..9537fb723 100644 --- a/ui/litellm-dashboard/out/index.html +++ b/ui/litellm-dashboard/out/index.html @@ -1 +1 @@ -Create Next App
Loading...
\ No newline at end of file +🚅 LiteLLM
Loading...
\ No newline at end of file diff --git a/ui/litellm-dashboard/out/index.txt b/ui/litellm-dashboard/out/index.txt index db2a9c631..f48954f2c 100644 --- a/ui/litellm-dashboard/out/index.txt +++ b/ui/litellm-dashboard/out/index.txt @@ -1,7 +1,7 @@ 2:"$Sreact.suspense" -3:I[27680,["787","static/chunks/787-5bb33960644f5c7c.js","931","static/chunks/app/page-992f4cdd1053ee86.js"],""] +3:I[27680,["787","static/chunks/787-5bb33960644f5c7c.js","931","static/chunks/app/page-7f03ccc8529ada97.js"],""] 4:I[5613,[],""] 5:I[31778,[],""] -0:["lGjwnJSGwBqa476jHHI8W",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","$2",null,{"fallback":["$","div",null,{"children":"Loading..."}],"children":["$","div",null,{"className":"flex min-h-screen flex-col ","children":["$","$L3",null,{}]}]}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_c23dc8","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/a6a9860a7fe022a9.css","precedence":"next","crossOrigin":""}]],"$L6"]]]] -6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"Create Next App"}],["$","meta","3",{"name":"description","content":"Generated by create next app"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]] +0:["p5gDwQBbgW8D3Uz3lgoZg",[[["",{"children":["__PAGE__",{}]},"$undefined","$undefined",true],["",{"children":["__PAGE__",{},["$L1",["$","$2",null,{"fallback":["$","div",null,{"children":"Loading..."}],"children":["$","div",null,{"className":"flex min-h-screen flex-col ","children":["$","$L3",null,{}]}]}],null]]},[null,["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_c23dc8","children":["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"loading":"$undefined","loadingStyles":"$undefined","loadingScripts":"$undefined","hasLoading":false,"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L5",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":[["$","title",null,{"children":"404: This page could not be found."}],["$","div",null,{"style":{"fontFamily":"system-ui,\"Segoe UI\",Roboto,Helvetica,Arial,sans-serif,\"Apple Color Emoji\",\"Segoe UI Emoji\"","height":"100vh","textAlign":"center","display":"flex","flexDirection":"column","alignItems":"center","justifyContent":"center"},"children":["$","div",null,{"children":[["$","style",null,{"dangerouslySetInnerHTML":{"__html":"body{color:#000;background:#fff;margin:0}.next-error-h1{border-right:1px solid rgba(0,0,0,.3)}@media (prefers-color-scheme:dark){body{color:#fff;background:#000}.next-error-h1{border-right:1px solid rgba(255,255,255,.3)}}"}}],["$","h1",null,{"className":"next-error-h1","style":{"display":"inline-block","margin":"0 20px 0 0","padding":"0 23px 0 0","fontSize":24,"fontWeight":500,"verticalAlign":"top","lineHeight":"49px"},"children":"404"}],["$","div",null,{"style":{"display":"inline-block"},"children":["$","h2",null,{"style":{"fontSize":14,"fontWeight":400,"lineHeight":"49px","margin":0},"children":"This page could not be found."}]}]]}]}]],"notFoundStyles":[],"styles":null}]}]}],null]],[[["$","link","0",{"rel":"stylesheet","href":"/ui/_next/static/css/a6a9860a7fe022a9.css","precedence":"next","crossOrigin":""}]],"$L6"]]]] +6:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"🚅 LiteLLM"}],["$","meta","3",{"name":"description","content":"LiteLLM Proxy Admin UI"}],["$","link","4",{"rel":"icon","href":"/ui/favicon.ico","type":"image/x-icon","sizes":"16x16"}],["$","meta","5",{"name":"next-size-adjust"}]] 1:null diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 5b8e42286..12eea2dd8 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -73,7 +73,7 @@ export const keyDeleteCall = async (accessToken: String, user_key: String) => { try { const url = proxyBaseUrl ? `${proxyBaseUrl}/key/delete` : `/key/delete`; console.log("in keyDeleteCall:", user_key); - + message.info("Making key delete request"); const response = await fetch(url, { method: "POST", headers: {