From a0ae1d6a18e524e6b5dd909f331a18ca63329eb8 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 5 Aug 2023 16:11:45 -0700 Subject: [PATCH] adding berrispend integration --- litellm/__pycache__/main.cpython-311.pyc | Bin 14286 -> 14268 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 20794 -> 24502 bytes .../__pycache__/aispend.cpython-311.pyc | Bin 0 -> 4923 bytes .../__pycache__/berrispend.cpython-311.pyc | Bin 0 -> 4682 bytes .../__pycache__/helicone.cpython-311.pyc | Bin 0 -> 4567 bytes litellm/integrations/aispend.py | 94 +++++++++++++++++ litellm/integrations/berrispend.py | 99 ++++++++++++++++++ litellm/tests/test_berrispend_integration.py | 25 +++++ litellm/utils.py | 83 +++++++++++++-- pyproject.toml | 2 +- 10 files changed, 292 insertions(+), 11 deletions(-) create mode 100644 litellm/integrations/__pycache__/aispend.cpython-311.pyc create mode 100644 litellm/integrations/__pycache__/berrispend.cpython-311.pyc create mode 100644 litellm/integrations/__pycache__/helicone.cpython-311.pyc create mode 100644 litellm/integrations/aispend.py create mode 100644 litellm/integrations/berrispend.py create mode 100644 litellm/tests/test_berrispend_integration.py diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 06cc9494ee01e9df7b04f0ec468267626e33bdc6..e1b1f2e550d0dd88b0b39aec11a88a18f4cf293e 100644 GIT binary patch delta 244 zcmX??zbBt}IWI340}#Aka4u!lMqVdY#){1WtP7YJH%|V@Zpt`qvpUCRM#eQjjygZz z9HttvC5(NHwc-=l@+M9;4bIIuYT+zm$@VEs z3{RN2QY@LD@Mxr%Fh4P2O|jg3Q)>$=qvYm316yWB$IWMq6PX#)C+nGu>7+9=+I(O@ dCqFVXF!8oXe`IA~;%E{3zy_8NnOtBV2LNa?ODg~X delta 265 zcmdm!e=eVQIWI340}zNWIG6HjBd-%HW5ebE)&)$ATPJ^HH)Wi)S)JoDBjW}jN1dN< z4pWWT62?BpTJZ^Nc~d7Fa>)W!`*ZzZ5~>jeim~RgrO4JWlrcy%mvJJqb0#yR%?CaPQKaL}#L*)5fekDlF*(jW4glJ-Qq%wd diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 93c419413cda155c856d5c242ea9264212326ff9..5f984ca5341fef0b25244bd7f2e34dc2a8c1680b 100644 GIT binary patch delta 6944 zcmc&&Yj9h~b-wrF4G`}a!6zt^BEhF*(tJpyBv6zkQ4&d;7A4V^X$bd*b%CXQ7{3-f)R!n3v$qB^Vb2Ep^b+$QKMlQy*`H zrggjtu%0&qHt-g}M&1h8#5;H!a5eLG$Zg`;A;#zY1ZDO6*sRVUiJu_R$xtwB2!=Or z+B80TED#eG;_Sz)$_h-Lg$EQ@lm-Wo#D(BoaB58~YoJxSu%!q~iV;c>O6dh%`(bRB z)dmCMaNt;2$m({T4hoa;P&7h}Sfh)RKu|aq2##m<$D?E-5FZsz2Yp&1_fd#)oCqG7 zIUF4u6UgZC2yxM7eXZSsr5Y>^MaD=gaVEEHUoLad|!Vw(%2J@2FIcLsMlvC zRZvb0wAE6}w$r1QZ8bPN(u}YPz{h2k0WucLs>dI~{}=~_UN5oGw=6Ae|H6M-s#zym zQ8~Pya!~sX?kQM;1h>#&bF+$xgqn8US;ZwZbenA}eaBYlu>og3iE2?Ja-vdXL|AA# zZ@-`2Nk6x5H#7l5eh_rK!=>Jd`CfXTqp`RJ$gF*Jl5leJ)3=L0@7Ty%=ygZ6<3Zqx zm4Q{WZoO?i}vn?|#u;lfny{##4TiV+u=}VWKrD^BJ51n<3&bpMd zA?<9KsaJ9iXqdM&`kmVpZ(04dJ3AGB+o^)gi;Pb}?gb8V5MdXNR{`*#cL$LdQ5_72 zgh-qmg2DrIi>tKSs)C{aKB!^}?9=_kwjjC)>pTpNvc}_6ksun@C>k3%LKTHRc7!$- z9;eS1wrR*1Fp(2fDy%J+d$f-U@zJPkih=OxWPk)FVq_ecrWV|uDweg;t|E_RTE#F= zF*-(MdYL)y=+yA~DB^}aGP31iF}_D)Pj zNqiR}Q9`VAwxXw}7Rreap$<8ep-3oBLP%PJfyww35pFR@Kk`i>oI*$-h}5#7E%_Lb zSZIm-8-gf%AT}7&Q+{pEg zhef)`Z-dOcjq1Bvxp!NT>)lpAx4TpIUK?_~*TEv)+0zY~>m6L5vRe^UQ-6_zE7Q^W zmaSCRT1Ee>#l~vrU$#`zM#weS4Bo9c`bt6z+epi6@>}3Xyf&eW8(`%5B&u%nZMtzK zRuPw|>Sd1F6M9i6>YrhFy{HoPRH|{RbhD1QfxfOTqRLt~y{5_MNhmtZ(A!JA7g93zppMJ?6)d{!>I|Zbs%rZ#o`AUuJb-{BxmqZh2_Ht{v)f|UYZzb_| z(L^6HJLvT~Gi#Qq-_%)I-?{p&ZnJ2*1e@a}Jj&$++Od&u(ZTxd;F;#?D|HspoYSAa zQRk-rSZ`})BSWCGJ8uQwS=UwJdJABgoPG`biYaJCC9hi>aYQ4kc+XlsVa+vY;*0XO zgAIz-7~}@UYx#stw8pFHgY6r-L^fWtq6XNCRkU3xS-*4;W|f}j__FiLgguUjV?JGQ z(l==(rGxfzyRt zSdc@UT#-*H3&?7M&`(_8vnp`zQIb_nB~jFi>YRKOmn|SdY!ZB}Fd98}k_yl>_v`j##UJGD@DJ2xbJaHLHw=CWNf})(OQhF*p?t#3A09mwSR$85K@p zZ1qi)j3z1H3P>YTa*&SZM0x;xF5<)SlOcpP{dZ!v7vVg@vj|Vq=H{OJ*4BayLTR=D z!_<{2A!yq%6u1(ItpCr2CH*K3;b+L*0J{pqlEW;0t@%sl9jwNDgF&3BUFNng+_|ZV zV^fsBqa&FzZO@q9^IePP8p&LKfYY11~zv~8)P>cYLxdA{wreD}rD3#D`X z{N^Xe=Eknxb*=rCff-?`s^+rtxxL@sdzHO-;KG4<$9&+td)|HZ)U}i8_PsMFJ}N3- zD)FA*`}Dr^`!Yq1A6Iu>JNVPXQuT1EdN^G@oblByCsm4K`!a)|(-vBm8GxDNpW7I7 z`G=<3MN{qNscWaD;rpdfY?)z_>~8LJhB?IXoMfuK!7yfl`%Fft1UiSPPv@e!N-|f; z`fN#=wx&&6CDYd5)+hOYR_J$Wv!;wjgUSGaG8V>M@}a4E(NukT*BfeS=qTKrKnp+D z&q)qbHL5Wnt1%!eFu>9Ccf5Mw?h3dp^{WO=oK(O;Mlw|owyC6+2B1?F`g;l#se0Ev z8pWbU1$1yVh=AqCBNg55r@sP#J1Bbroe;#bJogyfFMH{i>dVX=uSh6(xNx3=-4s{Q z@9*&_l%kTR;CwBrwOA60NKIS=0`iGY-dRbDYt8hpO{Qc*Au8bWfT};sdV{N99DozM zMSJ~pBfvg5%b$Yl(b0kg3l|(_&9@FSPq8dBYm`4SaIi6>5b{Uj{bU%L!STj5Z6aHM zT)kui{e`Y;Oo#}lC&{+tnY!PzU7Nb#N*@Tvwl%FXx5D^h=)QjqziCFYcEfYoJy$dD zOWUe)*;|p|wVdF5X5r=;nBoPWI;%Vx4MhlQ2b(jRoQkX*y}6S}D@1}(9=;@G)iNdc z@<$+^ujD(}Ob=((%5kj=tb9y~;abW2Ik$~}g8T?~;HaZOOfw(p3cg>rU&sr>in%%XD2vWt?+LDi=IU7SCD3lZNTN8Eesp)|y2tY}1srF>P&} z?wjuW*y{Mk;ipE=jb6An<*ZA?1>ahqw$@MgWps9lv&-t!=@u9F8GMImN!huh&m}Yv zo@;n@J}{eK5L+8q3xsnoL#Z}j+DBi#bIt~Z4?v+YU-;#CF}cNgAGk~CnFj$W)hWg{D72CS6sIT4v}e)yb?B^Stq#=BtL6J zYs{ydWLn-{r{HGqp&zw3vmR>cC^Z-T&OpQE1@CVDiWx55&*Z2zKQF3CalD0IZXf%@ zBIt>ZVqI~6{@ACp9h-gMg3;uz#4jL~3lxyLfbbl`^9X-Jf7MajBfD1&1!VV6zK7J~ z2;WEe0RqMpcNJPD9Bpks_p1%nWwtuYw$*;*wjplEeU*WYjP7wW9p5r!tWp_o zFaR^!Wv-IGzvb)fKhyiR9s>vYldWgH;8P3%E@O1gdlrowB;$r9y;~}5OX=Iw`gTd* zPP1FR2lVa_^%aZyip#2Ncm4E?#E0R0Pq2NQq_2SE-LMajcR-Z2f+(?M(O4}Rt7VCu zDgBnTev72v0uudf5iRMuGq7ZIEi+1uaScA!nxAdD>X)=_DQ#O?+Xlpv)+xD;yyHpx z52pNw(*8q`gD0gONoz+W?MOyjFr&oZ6H53XaC2D)0`b3q(Yom)UBk(PKR%M~zB|=D zl@RPT4Gq2vQj1tlM7yh!`>2ig@Mv~(W0L-BWasDK8?wb0vbQL=~WL4Uhl zUQzTDxiIh_fT9=uy=>X@|K?}wegRpCHaBCv%!E;q&yv-LVxdSZ9*6`50&WfrnOb*N zhFg*BL6EI>2`M~pW!FN~NNEtX2pGfp@=-DNL;eLJi|}&<*}||*&bV@RCD$NKEnL|7 znIh>1@?IF>Zft=wlvVQqVImp71nSlE`-D26NIoZA;3pFasbNnaeMv~ zIx;HxKs*pr0*O86`mKZ^X@vLAR_e;I>!rXy|eUMNi&n$w2nX>Eq42E6o_13G$RphM@FHfHQ@ ziLua{!M)~c7Q*LMxR)jecl$T9x@Be+PO$Jz(6G$hf~Pqh>sV%1p{NC0tU{Q6KIm84 HK!N`SuHL^$ delta 4026 zcmai1d2F1;6`%R`zIVNs*GIhZz46+{CU(r__&$RZYH+ZFZ8rYa&N_QIv!4qwYivUP zNU24RDTIg>*M z-@Nzc&GqKJ*|*=P@1CdT*G(opfs%dp6S1q~v^kr$oGv?5AcTz((~v1*9x_KPLzakj z$QrQ?*(ec6kW6OrR|MA?s+zNlG~j^hgzAFohMEo4GeH75@S7VVLwTa&I2p7q(_2$AgfAkjs_&UDU$w)q5HZz)0yN78na242&I4>BnO%5|BoNkB|AZOt$C3 zK6V7RtQ?L_Oa$5Jc$7If*O%MP$kiY>6rEtT(8^1GqQ9itRt@Bg1u0=#(;zPe;e3P%Jt+76^yge*ULyAAOkrJ9}I=1agLJ z=LbEz_^Y0h0(ngPL{J)yO<|uvcyubj0+Bcifm$^AH%~F8R&LDoSiLGj&JrC-kT!Bk z7@kko<^EX-GMl&9C6azpL?{^qhwmqkD+bA9)VD2p(fcK(`}yfYr^}bkqBuK4cVsHY zq;|$)j9K`%g)Od9oK+b@Icg|F(U8O@cwdp*xf{801PS2@gc&|lR8NobUl-Z!J`Jk_ zUP_BgMb~CgUQ_JukhNz}`xro61K>>;d6!uuiV)7Bcn6#S_?}Yig)x8uXvd-G-K1!jBQaj%^--btj6s#RH^k~VmO1p4| z0>PU}vrBc^E&zSmNs)H7?1ILJW_6oW_|Sn$A3B?b_8iqmZq)k7Ly^vD&Vt6(N_BI` zPT}gEsCIQ{v(R~$YOx;G78@wiceR+HaZM}qm=t457=FFrg+j;rhN=0><@Zn}f3&fV zk2V(bccAX~?JE(6M`qPs1?0Nx9*|P?ilL7TMC9n+%15VGbS@6IRrUV5OLfQlm6!^KZ!c?NU zWWE5?xF|0jO;U1-Mx(GDmdtk8BoF_fCC4EP_JhEh7M!AN9}ic=EXFNsm-ospBown- z`SPCd3ctDfzjq>MV<+?pqnNdIFZdz9FW<1aE?gD{mvl)OVUch73v4C{F6Md>?Fl3Q zg~nms!gM4|$mFg{8!hMWRu=MScbm35Z!=~}7%zakTtt_V3nkGWK2y0H0_MAwMG7-m zxK&wQBrMe7J>O#8EtRbIB4pBzIhGe_UDi{=!f-kiBvha*Ml#wY?)j9=xVZG$}{sCh6nYBJ?#UPZX9;1(~hoc<+ zK@C&m6=egwq4qxYVI<@HiP~d)?aqn=4rr#b0-_ko1VS+=K{koMAu!Bwa>j)y_A;s| zreae}PG~w5-Gfkw0$ngV9SO346ig}N5@SC`$wU0fovz6RB%eVg<>BDbxE$6}nz6u? z1PLppoSqJeDHY<*OqN9b3IsXVVajDq06tqf`9FY+be2Qv3<5_u$?vUeai)wI6Gt

go?+5yZLBFS7al`4_Q=_<1qXK$N#cjC`;qOWe zLlA(Tx6|Qsvl<9I8d06OX=jt#-F|A0ehNHohS$y8noe1x^LT+XtQjenhw}6J`z;In zWOH}+DZ0R9Z_I@iWQypJiujGH!n6m{g%>wGFjp!A51gP*{&8!*VpGk-746wNS6^la zd2xGVrR&O0w5m3A7Wo<}%~@Kc1R_>=ABb6H642S{ln!FW6{5sdrP>1S0ExkeU@ zPsO6~;ArgNB)cEA_adMJ&3O8!$Ht(Sw>6W(ymD^zR(jdbLGE+Q9>-q64zjb|jg*`d zUqk9VnFG9lGo1DA4JXTQm#GJQ<|PH0z! zY<{HkB)!RL*FkVWvFmj0cWbMHpfP=}buPJje&buI?O3kuTB+@V7JQery(`+@C2j9b zt?@@nNXoY~E*k}j%RY%Bu$<05@`jpF0VySn{~Kl)YOHOY!zhmsEProFpd@+$G| zEJuqJyyF7XVvs+xw~gk#`srS};~kK}N7;92$F%XQzqCOQk9`L#lx=~;YhooRQ92o@+ zMuHMM%6ftCQ>M=!7c2k2NGZjD6o}&ja5Cvwv>l}gM{p__l;+7Ti-Z5`p)P7owhcTb zsAUgd=m8(B0IqN;aTHC`b7Q4DchP%}Sine*k;>k!t_| diff --git a/litellm/integrations/__pycache__/aispend.cpython-311.pyc b/litellm/integrations/__pycache__/aispend.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8231afd79716b17db0915965b831719713ce8fc GIT binary patch literal 4923 zcmcf^TWlN0agQ&N)KR1q*|HTqNIhax5k)ClzS58f>yWL zT1mP2T9e1}9=@(fd3m2n)q}Qzz6Kb-U_k@_3_R49##`PwhXtKN{DHR-fAF>iW&v-Z z%+>JB1r%(0NUKqC!;4Vj;$MAN<(^-RCcgjAI~!m8c6&5vHQXs~C8kJALRvN&#dIdC z#Nv{y#4-Xdu#E#!R@u)M*G9`4e^=5-M#Wd!fi1SqO!>0ju_DnvCT@I}L5UySb$$N( zOTQdVd^)-mdA;RciTL|y;!iJqG4^r?GbSQGLL%~&mHcZo=v6(5j1n5^8wn{{d__`O zvs$V(R5gbBj|~i|dwE0s18RM>+`zFV)mNz~L^%id2^AFDDSudXo1X&9RiK9kK~Fr% zWqBbare%doE1?0^S>z0=b&9Z}gszCPln(WS8FldmuCnfpOQ{r>=0j;Qv7jWc#|!&c zz;KAo^2iULn{a695uUT~%-d)V4kKV2Zv|}UZLryP!*lZVg^ZBq&q;}dfa9g^R0lon z^nJ7l=q6Gs+7`<+&)f$3gK|BGV&xO{lpdCSJ<9irM_`mSYBYdV=O?0(*vawn>6z%o z*y)JjE%VNwj-8#D4cZKgBpZ%|pa|(@!_CFxQZ}u`MBb?3GGc5=xE^E-n=B;fWon-P z%TwVQS-^6531d-S;1{GMw zLt4`iaM*Qt9)UE9+bJ#&P)mNdmsh2v1bjbIjp#D{tAQbKsG7 zYrJijR$-h~`%`t;cP01eaO}32IvnYTd`*r4iw@*$IjagjceN%5{>^N4=A67st)*j? zDMy;Xj9l=#E60zZ%YCa%&Q-~K+PqW`XmsVO`d7(T*l)_Z-wA`}@`*#Ja+Hno*)1<3 z5Y^tk`t+JJs)g~tT8(U_RaNu(KksSIo^#Aw;8wb$m@W>s0W$1anM(+UCxfL_Mkz)E zOb@J458f%06zH8PMF|%cGD$Jc!DR}KxBTPJ#}>yPQe9>+k$53#1`xxV6w-zbuB{yO zV2D02Op3M|EEePpTtOiwU0F2T;4v|bDW(L&2^X?JwN{x@h)7wPkMUA6$zda~&#HYQ zb>?6(oCUk7Zg8^@)hcwp+OY$3y! z!D3v9#kpiWo8%M;t8LY8Wp8^2^pd{^PHheCI8d;MIJ@=U5OD_eTHlY(klHqY`@P3L z%xb+~Bcm6!-WhU?b5gNz27Em@P7Y^tr4yNBzbjK>z^Z+MY4ERyNq|> zPB5n|pCj}ZGimt@p@B&YVM74A=g^NQh$jF5CU$fu1EE@-p%EF+sOK~-uu!P zm+u_|<$C|vb|BwBUg$~1+o@#x`t<$7ojb_li82Lz!>;?<-tXC*(K?36@ENV+Y~FYF zMxE|!fB(d$pmh$Dk#kz-RNgmr!+XQ4w?6;#_?@1Q!ykq7gX7xZc)l;9^+ocn5vo0O z$9m_?$I~B8=Z7b>;fZ|zYg+$n`L@?!(64$qbZ;-|n*|a)`#*LU|YQ*7vNd4&Daba$KeTcsuX7Wm&CJ=p8JU z#j?lRsPI6gR@h0`E)D*>X4hv|`?2cUMr9l-E8v~gQmf7!+Nhj$Su)4uEIDh=b{j1H zplt1%VC$-D)tz%eB%o0T{AQ72Ai-gxSV8&QW$cW@bW;onAXT))gIjHlsO-1|gF}9K@p?uuYT{S`C(NtP z?%7axD%8zCZyB<0YQ!rYphpTQ?yZ5jAyqM;a ze!6CUh#Nv&IpJ3i?G>BA7`7qg54te*&bSVc;iRUSLBKN)5sp$2bW$0^3K^l{lodQr zdH5(5b}5*P3s<=KlHrCeO>s(0ScwPg%n)X-Bc|yfZlehPbr21kpeMF0z^{oc7}hLK z8Z`?7$HOr(U=@qOt~@OE)2;#9_1UL{Idg^D|4WcF1#EONWIDylAmsNK!jKD2XiQj! zwN{&|<4?H({@{=-!g+zfV{3EO>+Iq6Dbjq2y#95Hn=j?rS&f|~?CgVv=$7qQUeXXP z!20-u!12xDpS?^1#{qQz;EvteRJVfwt)Ke08+G((fx&x$SMCO0`OP!iZ<05@p-sG* zKY3X@c{v|=OAEYpFOaw!NRS1b56D_TCT!b-=?nL!`Mc8)+7?N21>)AXmRsJ>N8jc(t>c9) zuhucCKX*j$>i@7w>pD(OOle(FJusxV9NlcvS_bsNDZPJs=Nb0_m_Y$Gdevzz8R2YD&4LS`6 zFVVj;4Ea97eSnPGq{Pv5%w)er*)LE;{dU2iNlMW`Y8v^a^X0Y^cMft zQF56l9sxiPbZ@PYDb1gT#D(2s<+RgzpLhpqwW?cG}{;nTz?0q9`o xi`34862P9?b3NOWBq5Qe48TsiuMRE&pf&eS*Eqw#Zao=)X7WFON~^gS{{h#OI3fT5 literal 0 HcmV?d00001 diff --git a/litellm/integrations/__pycache__/berrispend.cpython-311.pyc b/litellm/integrations/__pycache__/berrispend.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b89fd0a31985c2f461cb7e241f618b67070cef00 GIT binary patch literal 4682 zcmcgweP|oW6`x&w%aSGgk~sGHW1Vm1BujCeTrSDk&Dpx-a*pj>9Gdu&svGS_URl~z zc2_<}T@_P~R?sVi(vpLw;6Hk4N?JG$!nu@!p-@^1WppgPWg(D$(H^B#M-DFBUwyM{ z$x`gle~!++ef#F?&Ac}=zj@M6gF!!nbm&iiMpEG&;UH2%J>5swOA8GvG;@8Y>|F*LR=42%gY-=$%RB%2?v)p~I!8%MK?Fl{#A69Xh&D3O6f?k&+#o8rce6qcbJ)aIdd$G*Hf&n5h39CFdK=BbW(4l!9KcDSkncD08cH=b6^CFHddl4XO7e|2Ub`@poSI0T8Xup& zI5nGkZ31=^Eh(lu!>hcwX!_Z7S}KZaO5h@t=~VdKb%khUzBn7dsPM89Ux2wP^W40Y zV;2NAJ|U%xd0teNcur9HTrS`E3ZlwqWL6aFsLD=Y%joYty>}vb&xvZ!NxkP}#raOC0?%WpLunV`>xsygtCqodz!9qKg4eB4 zYP|(~7m=K7&TCY6qn4vLWDW3RBgZ*6QI@|e9dLgva1vXjXtfVe@ok@ z`CD!M@5Dhf@#}hO9%ZGm&FZz}+ltMtThp?r4aWa+H3phz)nd>8c}+E!=DtqDk@HW< z#5uAPnCU7iY=$?31zE}$)OGBZ$%ZvM080uvo}809ru$^RkQ34@9H7{E@4xcbW7r!p+QL6Pi`)8v9!8J8U(};7d2RKkVLJLv(P8viU8S z5$=EQ;Ju6buETibtiJ2@YWVeA?M8Uuz2oOGZz0)#;3n=XYR!>B>;M;&zx{Z8@Mw*+WH0np_$8;-*K)6SjJtV*rsIpf>D|H<$ zUyafn0IaUNa0Ks@V`!x-aFWWtvR?xbEI~=R4UVjnfbR;q={BWSFe?W*m*xP-9?}Au zgKL6xtxcn$#IphNoVQ8#HOc-BSwqr$(Kwi@#?1m_pf zlH)RZi;8qim0sL9tPF{G1Tn+Jm?Xptvclxn3$7Q%G{HpXx+F6e(lDj&c~vbOkH-^3 z)^mI$kx0a?sG<}Ym!Lm{akDoId@0NpYybrX@hn7frne4>at}=0^rYt{fDpxkXw#GD z6$_s|Xa>Ol1Nl}cjA;RK> zB?u$UN(Z)d!gWDpa}39-ECbNZr`6+3gf>0%Jj;P|Oee9W^!yfGlVnivV#Fiw1NCOm zo~6QrMVw;K4192-$SW%7DF9TPK2>JZ{53YcVEVz!c~(vFOX*0+x=SoS*^_>rOuCma z5|oik3}U;~A`fA`!kdnwoRbfc+J3^|r^LF!#D3FBY_Ytfz^|EBFlQJW&V8`vDSi=F zuhg?)njH%dHR@i5?n=o9aOWOhhY{$moWnhD;@QiD_qm;^M&FH9QU zNAXK@)$X~<*++eQSF+W<#K*m3AN7t^dr#@Trz+=+Q1`o6a3})wXz#GWMDKl7XO0-V zq6Tw-s8*9AAA&XOe4z&deCQ_WdMY3wpXcYsh9hpS$I}<*bEKXZO34xZ?oO zW@UYmd;9gi;jMeuI#IBPc;of8Szth5f=;ZMeAqScgYwFS_xG=JoX3IlMj%|7s7yS0@t|Ou<2cU|wYNd@5!7Qz!zluv;4Gh~6kwfHi@Ba4w; zrc3}^Zag(7?Rpt*ygKvtxMtu6Y~siExjJfSGqVREx+7lG%}M0PjZFT5kVk--Z8?c0 zp^jC2l$3a3pF#|l=jATa%9_;}k;pY=&GZZJN!z+nc^t~fU7?U)U^NFtQE*pNofvg& zCIf|*t;ax}%WfTYR?#*cZNq4rfx@^>jaHy&Z)7ckuZwuQ1Z3>qzt)ZUJl-Y(8Ijo9 zO}wDs9u?@R+=1BI63)uFTLCimN7v@Da0Bm@fsEMj@6KT9E!@8ZWb`tt2k=!62YDc4 zaPR68z9!(#ERezMSHYOyz?y}V_@%U33DP2OF#z9^B9@u4hiU~ q+`fZrXK+TsT?L@pK)4+a8n9)5ZO<8sf+gFUe`=L~_KaFHqv_r33Z^Jky02|+vkfNDOoN^uSKI=AsxvVgdCTSB=gzm^)Wp#8^16& zvoIfLr!UU2Z^V~EwC*j4`Ri$35ZN^`qucV5?pzUMA$MK3UlZ0tluk=RW?3R4nV)B) zZ%cwGMX!lsT1xS$e1^N0=AyBDaxE+5WGR|S%R(lTt+nZ#EUbu}oX+Q@Xv#87q_D2L zSvH+Z%PdE zdKMWt;BQ#=FsOYS$z;wdU!oGN*kb6tizNqG+;JU=#}w#wR1v_&*9?1$2Nk-J67cL# z@xDY~*^1!bCHm7B0kd{S2z`m3i#hc_^L*==M)m4#fKfzX)PK1GUoD`Ngr% zPHipkw2k1lkxF~#pAO@mne7|**0ty~^q{KkGn;2O&+fun2ktq&eeT|zHgXbrP}TO+ z##-QouinL-r?!vZo6v?|haOZlaB9=J>8!Y0K4035sBQ*S85z5O1s{o#>h9eQPuve_ z;n;U?Y2kAZJX-h?UVK*zzlT%VJ%m1@=55gQE?5w|yLSD9cxV#)C$W1{Y=ccET>z?u zgjeIzoCf%N>NfH~*j1c7b&Ddd-WYf&B-ervLS(H3Mhfq+L`bkD+K!t4}77~TI9iBG<`EzPqm+y8uW(% z%ScIx0{dYRe*KV07}y|w%8 z9Kx$8F&S%Oc`cVDp^{n7i;NL)nP*ajS3rZu41mw6H$cbRNI~zpOO!(0vRy}4&~1uB zpL&ePpPK&~G63Bc8P!|3WKt-|0?!&;jbJO0o4_y|MqK>aGqob*gqsENWHDS-)XZ2S zpdBl%0Ru^jOl9&(E+d_c)X)cji$rpwC*Py*Ax>f@1@->>TRr#2ao-7ZSPzX*Q}jU^ z6n-T#9+ua{tND=4P)Y0sJ|a2ykiBk#fr(;^nY)CqgTcT=#io}>UmgvQjfO|Zet2L| z==3T)D!MBraFD-1;v&hMye1Wo)Mr2`l8RX=P@YRe8We^76 z0MR_WY}s@sleR{%-~ly7`Eoid=q@40lMXQOS0A`b*83KCI$=JHx>HVP!HvLnBOu6- zR_v-OQs-og=GpZX&+M}p7e(+jO9*p=lbBpyW~}30Udu$7MZ=2B47~rZF~yGO^4Mp| zWFBThKHbjc*7dfOmGYTx$buPUv7abUVuNmn^icP}uHZfAvIUV`Hr;JK@RCj!^6<_> z0zpodO@cl;EekgZiO3~|t6cJ$-UK%%%gL;8GZ|`qnpl{Q#Snf?hPhcHMR$X}&7Q9y zf^Zg`>uO#S^qR3*OS*UrXvHW2#Mt_Qr(7W2F%LFaH&=hgdY~$%hI+6z(|}0xoXn9O znY+S)WBY)Gv;mLelMS>-4|-ZF-j4E{xN`|#`VE1d zOR6`ac@x;1c+}0*-_r-Owy**dDwEOxcBi!N6mBnV#y8_QS1QLVp~(GN+#4s=dwAC$ z{9@e7g2u)lj>UGyVh_ggZI&fu=eN@w?; zi)LE(5CGt`*Mi!PmuGhS#&tAb?0n#doI!E%V5;bl6ku-5KvL>yDqh`tE(K!oNj=oxX^(=lT zkB16SVd*@4=|YpSP1~XQkUCS>bj^?ffiAk8n5w;{9Q&yW9b!H!Z7tq=@jDs^<7zOj z1>@z}ia)6N`?sVW|4Z2aQYFy6>BBvfYG6tWOyO$XHK*5c@|u>sfqT}Ws)3>wDB?iT zqM5rt^w%)%nS-hZ&TE15xLTj=+3cRyUDKE5|B}{@omY=t(2iZeo%2vt?}FxCz}|(5 zH&Bk1V^5wukq&?-ejNA4C(z$o2ML_$JKJrahfieq_{(uwfo1h3mPCfN4C#AW_JcJp zW3Gr3KrIp$X1usSdV>Uz#MQVGBFSD2K#~iIU~JTWyIsuI5bhktbPzOmjDaInL6hGx zwTEip$dIFT4?PP^K}1H#qdWOL`6d?Odk=|I0CW$RlT%{8kWPv;nItKs4wsbUSqt|x znKpE6)ca@ml)2}V;v%4sj9L04fIT}!QE*eJ0~obFTPw)FVf-p6jQ6V*bYR1&p#v&v f*HAk~?G+Tj)w*Y!rYP9w{_x+%{EyFBH7xNT;AXU5 literal 0 HcmV?d00001 diff --git a/litellm/integrations/aispend.py b/litellm/integrations/aispend.py new file mode 100644 index 000000000..6723a6227 --- /dev/null +++ b/litellm/integrations/aispend.py @@ -0,0 +1,94 @@ +#### What this does #### +# On success + failure, log events to aispend.io +import dotenv, os +import requests +dotenv.load_dotenv() # Loading env variables using dotenv +import traceback +import datetime + +model_cost = { + "gpt-3.5-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, + "gpt-35-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, # azure model name + "gpt-3.5-turbo-0613": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, + "gpt-3.5-turbo-0301": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, + "gpt-3.5-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, + "gpt-35-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, # azure model name + "gpt-3.5-turbo-16k-0613": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, + "gpt-4": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006}, + "gpt-4-0613": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006}, + "gpt-4-32k": {"max_tokens": 8000, "input_cost_per_token": 0.00006, "output_cost_per_token": 0.00012}, + "claude-instant-1": {"max_tokens": 100000, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551}, + "claude-2": {"max_tokens": 100000, "input_cost_per_token": 0.00001102, "output_cost_per_token": 0.00003268}, + "text-bison-001": {"max_tokens": 8192, "input_cost_per_token": 0.000004, "output_cost_per_token": 0.000004}, + "chat-bison-001": {"max_tokens": 4096, "input_cost_per_token": 0.000002, "output_cost_per_token": 0.000002}, + "command-nightly": {"max_tokens": 4096, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000015}, +} + +class AISpendLogger: + # Class variables or attributes + def __init__(self): + # Instance variables + self.account_id = os.getenv("AISPEND_ACCOUNT_ID") + self.api_key = os.getenv("AISPEND_API_KEY") + + def price_calculator(self, model, response_obj, start_time, end_time): + # try and find if the model is in the model_cost map + # else default to the average of the costs + prompt_tokens_cost_usd_dollar = 0 + completion_tokens_cost_usd_dollar = 0 + if model in model_cost: + prompt_tokens_cost_usd_dollar = model_cost[model]["input_cost_per_token"] * response_obj["usage"]["prompt_tokens"] + completion_tokens_cost_usd_dollar = model_cost[model]["output_cost_per_token"] * response_obj["usage"]["completion_tokens"] + elif "replicate" in model: + # replicate models are charged based on time + # llama 2 runs on an nvidia a100 which costs $0.0032 per second - https://replicate.com/replicate/llama-2-70b-chat + model_run_time = end_time - start_time # assuming time in seconds + cost_usd_dollar = model_run_time * 0.0032 + prompt_tokens_cost_usd_dollar = cost_usd_dollar / 2 + completion_tokens_cost_usd_dollar = cost_usd_dollar / 2 + else: + # calculate average input cost + input_cost_sum = 0 + output_cost_sum = 0 + for model in model_cost: + input_cost_sum += model_cost[model]["input_cost_per_token"] + output_cost_sum += model_cost[model]["output_cost_per_token"] + avg_input_cost = input_cost_sum / len(model_cost.keys()) + avg_output_cost = output_cost_sum / len(model_cost.keys()) + prompt_tokens_cost_usd_dollar = model_cost[model]["input_cost_per_token"] * response_obj["usage"]["prompt_tokens"] + completion_tokens_cost_usd_dollar = model_cost[model]["output_cost_per_token"] * response_obj["usage"]["completion_tokens"] + return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar + + def log_event(self, model, response_obj, start_time, end_time, print_verbose): + # Method definition + try: + print_verbose(f"AISpend Logging - Enters logging function for model {model}") + + url = f"https://aispend.io/api/v1/accounts/{self.account_id}/data" + headers = { + 'Authorization': f'Bearer {self.api_key}', + 'Content-Type': 'application/json' + } + + response_timestamp = datetime.datetime.fromtimestamp(int(response_obj["created"])).strftime('%Y-%m-%d') + + prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = self.price_calculator(model, response_obj, start_time, end_time) + prompt_tokens_cost_usd_cent = prompt_tokens_cost_usd_dollar * 100 + completion_tokens_cost_usd_cent = completion_tokens_cost_usd_dollar * 100 + data = [{ + "requests": 1, + "requests_context": 1, + "context_tokens": response_obj["usage"]["prompt_tokens"], + "requests_generated": 1, + "generated_tokens": response_obj["usage"]["completion_tokens"], + "recorded_date": response_timestamp, + "model_id": response_obj["model"], + "generated_tokens_cost_usd_cent": prompt_tokens_cost_usd_cent, + "context_tokens_cost_usd_cent": completion_tokens_cost_usd_cent + }] + + print_verbose(f"AISpend Logging - final data object: {data}") + except: + # traceback.print_exc() + print_verbose(f"AISpend Logging Error - {traceback.format_exc()}") + pass diff --git a/litellm/integrations/berrispend.py b/litellm/integrations/berrispend.py new file mode 100644 index 000000000..1742bfed7 --- /dev/null +++ b/litellm/integrations/berrispend.py @@ -0,0 +1,99 @@ +#### What this does #### +# On success + failure, log events to aispend.io +import dotenv, os +import requests +dotenv.load_dotenv() # Loading env variables using dotenv +import traceback +import datetime + +model_cost = { + "gpt-3.5-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, + "gpt-35-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, # azure model name + "gpt-3.5-turbo-0613": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, + "gpt-3.5-turbo-0301": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, + "gpt-3.5-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, + "gpt-35-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, # azure model name + "gpt-3.5-turbo-16k-0613": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, + "gpt-4": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006}, + "gpt-4-0613": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006}, + "gpt-4-32k": {"max_tokens": 8000, "input_cost_per_token": 0.00006, "output_cost_per_token": 0.00012}, + "claude-instant-1": {"max_tokens": 100000, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551}, + "claude-2": {"max_tokens": 100000, "input_cost_per_token": 0.00001102, "output_cost_per_token": 0.00003268}, + "text-bison-001": {"max_tokens": 8192, "input_cost_per_token": 0.000004, "output_cost_per_token": 0.000004}, + "chat-bison-001": {"max_tokens": 4096, "input_cost_per_token": 0.000002, "output_cost_per_token": 0.000002}, + "command-nightly": {"max_tokens": 4096, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000015}, +} + +class BerriSpendLogger: + # Class variables or attributes + def __init__(self): + # Instance variables + self.account_id = os.getenv("BERRISPEND_ACCOUNT_ID") + + def price_calculator(self, model, response_obj, start_time, end_time): + # try and find if the model is in the model_cost map + # else default to the average of the costs + prompt_tokens_cost_usd_dollar = 0 + completion_tokens_cost_usd_dollar = 0 + if model in model_cost: + prompt_tokens_cost_usd_dollar = model_cost[model]["input_cost_per_token"] * response_obj["usage"]["prompt_tokens"] + completion_tokens_cost_usd_dollar = model_cost[model]["output_cost_per_token"] * response_obj["usage"]["completion_tokens"] + elif "replicate" in model: + # replicate models are charged based on time + # llama 2 runs on an nvidia a100 which costs $0.0032 per second - https://replicate.com/replicate/llama-2-70b-chat + model_run_time = end_time - start_time # assuming time in seconds + cost_usd_dollar = model_run_time * 0.0032 + prompt_tokens_cost_usd_dollar = cost_usd_dollar / 2 + completion_tokens_cost_usd_dollar = cost_usd_dollar / 2 + else: + # calculate average input cost + input_cost_sum = 0 + output_cost_sum = 0 + for model in model_cost: + input_cost_sum += model_cost[model]["input_cost_per_token"] + output_cost_sum += model_cost[model]["output_cost_per_token"] + avg_input_cost = input_cost_sum / len(model_cost.keys()) + avg_output_cost = output_cost_sum / len(model_cost.keys()) + prompt_tokens_cost_usd_dollar = model_cost[model]["input_cost_per_token"] * response_obj["usage"]["prompt_tokens"] + completion_tokens_cost_usd_dollar = model_cost[model]["output_cost_per_token"] * response_obj["usage"]["completion_tokens"] + return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar + + def log_event(self, model, messages, response_obj, start_time, end_time, print_verbose): + # Method definition + try: + print_verbose(f"BerriSpend Logging - Enters logging function for model {model}") + + url = f"https://berrispend.berri.ai/spend" + headers = { + 'Content-Type': 'application/json' + } + + prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = self.price_calculator(model, response_obj, start_time, end_time) + total_cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar + + response_time = (end_time-start_time).total_seconds() + if "response" in response_obj: + data = [{ + "response_time": response_time, + "model_id": response_obj["model"], + "total_cost": total_cost, + "messages": messages, + "response": response_obj['choices'][0]['message']['content'], + "account_id": self.account_id + }] + elif "error" in response_obj: + data = [{ + "response_time": response_time, + "model_id": response_obj["model"], + "total_cost": total_cost, + "messages": messages, + "error": response_obj['error'], + "account_id": self.account_id + }] + + print_verbose(f"BerriSpend Logging - final data object: {data}") + response = requests.post(url, headers=headers, json=data) + except: + # traceback.print_exc() + print_verbose(f"BerriSpend Logging Error - {traceback.format_exc()}") + pass diff --git a/litellm/tests/test_berrispend_integration.py b/litellm/tests/test_berrispend_integration.py new file mode 100644 index 000000000..ac937e5fc --- /dev/null +++ b/litellm/tests/test_berrispend_integration.py @@ -0,0 +1,25 @@ +#### What this tests #### +# This tests if logging to the helicone integration actually works + +import sys, os +import traceback +import pytest + +sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +import litellm +from litellm import embedding, completion + +litellm.success_callback = ["berrispend"] +litellm.failure_callback = ["berrispend"] + +litellm.set_verbose = True + +user_message = "Hello, how are you?" +messages = [{ "content": user_message,"role": "user"}] + + +#openai call +response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}]) + +#bad request call +response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}]) \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 4c9fe5463..076378b1d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2,7 +2,10 @@ import dotenv, json, traceback, threading import subprocess, os import litellm, openai import random, uuid, requests -import datetime +import datetime, time +from anthropic import Anthropic +import tiktoken +encoding = tiktoken.get_encoding("cl100k_base") from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, OpenAIError ####### ENVIRONMENT VARIABLES ################### dotenv.load_dotenv() # Loading env variables using dotenv @@ -13,6 +16,8 @@ posthog = None slack_app = None alerts_channel = None heliconeLogger = None +aispendLogger = None +berrispendLogger = None callback_list = [] user_logger_fn = None additional_details = {} @@ -89,6 +94,7 @@ def client(original_function): pass def wrapper(*args, **kwargs): + start_time = None try: function_setup(*args, **kwargs) ## MODEL CALL @@ -101,7 +107,8 @@ def client(original_function): return result except Exception as e: traceback_exception = traceback.format_exc() - my_thread = threading.Thread(target=handle_failure, args=(e, traceback_exception, args, kwargs)) # don't interrupt execution of main thread + end_time = datetime.datetime.now() + my_thread = threading.Thread(target=handle_failure, args=(e, traceback_exception, start_time, end_time, args, kwargs)) # don't interrupt execution of main thread my_thread.start() raise e return wrapper @@ -153,7 +160,7 @@ def get_optional_params( return optional_params def set_callbacks(callback_list): - global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger + global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger try: for callback in callback_list: if callback == "sentry": @@ -193,14 +200,19 @@ def set_callbacks(callback_list): print_verbose(f"Initialized Slack App: {slack_app}") elif callback == "helicone": from .integrations.helicone import HeliconeLogger - heliconeLogger = HeliconeLogger() + elif callback == "aispend": + from .integrations.aispend import AISpendLogger + aispendLogger = AISpendLogger() + elif callback == "berrispend": + from .integrations.berrispend import BerriSpendLogger + berrispendLogger = BerriSpendLogger() except: pass -def handle_failure(exception, traceback_exception, args, kwargs): - global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel +def handle_failure(exception, traceback_exception, start_time, end_time, args, kwargs): + global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, aispendLogger, berrispendLogger try: # print_verbose(f"handle_failure args: {args}") # print_verbose(f"handle_failure kwargs: {kwargs}") @@ -248,6 +260,33 @@ def handle_failure(exception, traceback_exception, args, kwargs): unique_id = str(uuid.uuid4()) posthog.capture(unique_id, event_name) print_verbose(f"successfully logged to PostHog!") + elif callback == "berrispend": + print_verbose("reaches berrispend for logging!") + model = args[0] if len(args) > 0 else kwargs["model"] + messages = args[1] if len(args) > 1 else kwargs["messages"] + result = { + "model": model, + "created": time.time(), + "error": traceback_exception, + "usage": { + "prompt_tokens": prompt_token_calculator(model, messages=messages), + "completion_tokens": 0 + } + } + berrispendLogger.log_event(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose) + elif callback == "aispend": + print_verbose("reaches aispend for logging!") + model = args[0] if len(args) > 0 else kwargs["model"] + messages = args[1] if len(args) > 1 else kwargs["messages"] + result = { + "model": model, + "created": time.time(), + "usage": { + "prompt_tokens": prompt_token_calculator(model, messages=messages), + "completion_tokens": 0 + } + } + aispendLogger.log_event(model=model, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose) except: print_verbose(f"Error Occurred while logging failure: {traceback.format_exc()}") pass @@ -264,8 +303,21 @@ def handle_failure(exception, traceback_exception, args, kwargs): logging(logger_fn=user_logger_fn, exception=e) pass +def prompt_token_calculator(model, messages): + # use tiktoken or anthropic's tokenizer depending on the model + text = " ".join(message["content"] for message in messages) + num_tokens = 0 + if "claude" in model: + anthropic = Anthropic() + num_tokens = anthropic.count_tokens(text) + else: + num_tokens = len(encoding.encode(text)) + return num_tokens + + + def handle_success(args, kwargs, result, start_time, end_time): - global heliconeLogger + global heliconeLogger, aispendLogger try: success_handler = additional_details.pop("success_handler", None) failure_handler = additional_details.pop("failure_handler", None) @@ -293,8 +345,19 @@ def handle_success(args, kwargs, result, start_time, end_time): model = args[0] if len(args) > 0 else kwargs["model"] messages = args[1] if len(args) > 1 else kwargs["messages"] heliconeLogger.log_success(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose) - except: - print_verbose(f"Success Callback Error - {traceback.format_exc()}") + elif callback == "aispend": + print_verbose("reaches aispend for logging!") + model = args[0] if len(args) > 0 else kwargs["model"] + aispendLogger.log_event(model=model, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose) + elif callback == "berrispend": + print_verbose("reaches berrispend for logging!") + model = args[0] if len(args) > 0 else kwargs["model"] + messages = args[1] if len(args) > 1 else kwargs["messages"] + berrispendLogger.log_event(model=model, messages=messages, response_obj=result, start_time=start_time, end_time=end_time, print_verbose=print_verbose) + except Exception as e: + ## LOGGING + logging(logger_fn=user_logger_fn, exception=e) + print_verbose(f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}") pass if success_handler and callable(success_handler): @@ -303,7 +366,7 @@ def handle_success(args, kwargs, result, start_time, end_time): except Exception as e: ## LOGGING logging(logger_fn=user_logger_fn, exception=e) - print_verbose(f"Success Callback Error - {traceback.format_exc()}") + print_verbose(f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}") pass diff --git a/pyproject.toml b/pyproject.toml index 642da1ee7..c71bb1579 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.343" +version = "0.1.344" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"