From d792be89de85eb9dbd84429a2525418260b9480a Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 19 Aug 2023 20:03:31 -0700 Subject: [PATCH] making logging a class - adding input-callbacks --- .../observability/supabase_integration.md | 4 +- litellm/__init__.py | 4 +- litellm/__pycache__/__init__.cpython-311.pyc | Bin 5519 -> 5586 bytes litellm/__pycache__/main.cpython-311.pyc | Bin 26630 -> 28094 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 49909 -> 52578 bytes .../__pycache__/supabase.cpython-311.pyc | Bin 5733 -> 6648 bytes litellm/integrations/supabase.py | 33 ++- litellm/llms/anthropic.py | 16 +- litellm/llms/huggingface_restapi.py | 28 +- litellm/main.py | 267 +++++------------- litellm/tests/test_supabase_integration.py | 39 +-- litellm/utils.py | 161 +++++++---- 12 files changed, 237 insertions(+), 315 deletions(-) diff --git a/docs/my-website/docs/observability/supabase_integration.md b/docs/my-website/docs/observability/supabase_integration.md index 6ae4f65da..d9fbc2b5a 100644 --- a/docs/my-website/docs/observability/supabase_integration.md +++ b/docs/my-website/docs/observability/supabase_integration.md @@ -22,11 +22,13 @@ create table messages json null default '{}'::json, response json null default '{}'::json, end_user text null default ''::text, + status text null default ''::text, error json null default '{}'::json, response_time real null default '0'::real, total_cost real null, additional_details json null default '{}'::json, - constraint request_logs_pkey primary key (id) + litellm_call_id text unique, + primary key (id) ) tablespace pg_default; ``` diff --git a/litellm/__init__.py b/litellm/__init__.py index 688cd084f..7cbb0e996 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -1,6 +1,6 @@ import threading from typing import Callable, List, Optional - +input_callback: List[str] = [] success_callback: List[str] = [] failure_callback: List[str] = [] set_verbose = False @@ -216,7 +216,6 @@ from .timeout import timeout from .testing import * from .utils import ( client, - logging, exception_type, get_optional_params, modify_integration, @@ -224,6 +223,7 @@ from .utils import ( cost_per_token, completion_cost, get_litellm_params, + Logging ) from .main import * # type: ignore from .integrations import * diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index 480251bd565f14a8e15d43b411f5d9c807062199..c998bff4ac6cd91de57b2024a44a884cb573a619 100644 GIT binary patch delta 985 zcmaiy%TE(g6o>D0fKGvFS@3RYftEfgls*8Zf;C^;bf{X5j zT$ikjK@BTy;@{v>T^Q1ZiA&w!(&)x>hiZ&5@y?v@cV2Vm-f17j-pABus@h0!eJy>0 zkB4fP^!hNg63r9pU@~jy5+!RfJJe^GkY`*Gl@J7y4>mKlm*#faA^w4&VmPw=e)f zyu@ZgM=t8Ikp%+bGJyyqcYA8*CC4%$Curx3dToSxk1;s;l_Jy!(K4aEfamW6iT0!U z(E?}#Xu&*z*b|9{U@$MyFh+5VBB(>CQ5eQ-45I`_gD?Wgyp6^o1!>eF%x5q^j5>;% zU?CRX!&kG2#i+%Y#lf5vFFV1nDKNxmE4RcMn_VH_`@1*fo=g0KT|rTey%tc z8~Q||w{p{wlljx;2ac+JX*-`UY~^{cx*;v{vN{tn6l|(C6`8qupBd}hMOJl~Ok+AT zja{Q%6zdJ)^D}kKX0&m?Gt*=pC?TdND;rKKwN8pYyUB_sPOsLN%z{?rA6#LZvB-b8 zg4rvg_o~1(fhB?K0z(47ztndnt8YU|W0S@`EJ$d@Nq)N}({}3dK6D<`9V|a~yd_?| Q-x2R=g|~dGs}oQE1XtDU1^@s6 delta 963 zcmah{OHUI~6u#4UfhkS2LV?nj0`0U6eS;$SD9^ZIp@!sc8suDSb*3e^0}&E~3pOSu z24>631!_o0NPmHezrclyCPriY5yqA0UeLHO@lMWnzVn@P&wb6u%&Uz0Mpe5Aj>`8h z@cuvzD@O;(wKTY?A3U@RytEsA4=7Y+F6Ie~QaP9xe6z^T-bR%@Hx1*A0k{kyxB`7g z&i}A}2*ZG^`xhI;?2yd<&4wT%wPA=t?1IH%M6f+bV7v2%qqu?dEsVk#equA@N4<2= zL6!+5_X(sJc|3G+3Ucfudza>J4LE9%ZidPUri=&O8CD2CE#%cui z(ytXd4w;%llNe55ltQng*I*LWG)7sBGLVCO%|R!i07djV>Lt`C(U;L@**Ht?;?gW7 zQInXKn2|UsaYCXlF}q8$gwPzen8Fr$n8pPQa22i@MYBkY3PC<&l9mwamrW2kD5PH)wE#MwGjH<}HLJ+-*hm5$1`S;MLVv)1cItEFId zOt$Oz>VTR0!yTTtPkrT8(Nptr@oqdKU>IRHP}{Whl3px4(Q{MzBJUTO0RiGi<%6Oo zC>GRVeY@V+sO!00QFHUSUGT4XVxlJ6e(qm%`n*Q7R;xE^_J^Kl_8oOYS+-Briq`3F zF{`@G_=9GPb^I1Hn8_@@!$UZ?H52Vas4Q_$DUA-L%s7%V1gg+Wr*$ I;c|(}Uo4{NCIA2c diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index da220e0b9ba2ed202b74d8fc9b0f538fcf80368f..e35b110bc194545e0ae20ce38d9c983f87b34e66 100644 GIT binary patch literal 28094 zcmeHwX>c3anOFlL36N+U1W1A>2!i)biWEgk8cCE$iqt{sppM~qjDgsY1PTN=4NwQC zF(XgBDSD!{xwA7XdRH66-tkyo%URZzomEoFMy}0nl*(4pVA@e{r({SefA4+w>+dywV6hZYu${gAw~YTaiu!MOqHZ=N z^I?OYqV7@*#ppuRjC|D1=*YQ#Mh{1Q$S|Kblc&RLhEV=|!At?k>q5pEBdKSaF_B~8 zOd&ZI%@mQNdBzOKybwKanXxGMSZAzImLIat+h%O@_8I$p@l5f&W5zM>oN>;V%#@IN z1)EA>iL?P8l_$Lj2qf53f0bgW;{Ad zPX*j_b!ulB^ZPoA`Vsug+l&|L(mBtlpQ%@#LopV()-cmB-#F7a-!#)i+B42ItM|VT znIEYq6hL78IyKW0u)$M{8JqT5S185~kmFx5qk|f)j6+)kV4;FoOLy6Vbr>7`7n7V0 za!$Bja?i|^2I?>CnX<1NwNKZc&9pHtfTdl5rCj?YfTfrzhv(!m6<^nB@6?{nbTF0B zPA5|ZX&0kAOSM-20((5!s*?=E;ahMpF8JA4&>w=Vi3tQ2W+Mx?S_`Ct*-$VLj!D+Q z&Dp?0EEoy;_p!<~Uz7kV+2x}z7QqteuH=bDp#IeW>+=Tz{P~4YfDEHt5o*jrtLZHt za^Hgg55Eb?aiGS#R4h&D6g8L5YcU+x$M8;NixsFjin^lXsPE~{08Mhb_jKRX#SNT3 zk(ari$uSgXcpv1)kKkY4{sGYPYlidGUBf@j1DerIw&qJlfAm&(HW-ntI0}L5KpehM zB(lI_n^IAxgOZJ14Eus%UqBgaKzttJO)9u{gBHiFw)WP zYb+a#US+OELjG$(fA>gac5xn_7425YDt(;y2g6+pw}-a#%8653rXa(SVfiUs_}Xl5>`@fsZf|8@V22fDLR)AvT9ZE7?>&n?)`P zGE#mlvf#T0!ykBc5y(INqyib9Syfq>0ZddW2KfK52gKoBs!&60IV4uem9Pdz7cUL7 z9F<5DBHW$c7(J6iLQGyo`f4nT?B?*PWV=qW$39(q@zUqG&yay54l0W)y++8Kr5Qa( zUDvTDu9VR)0Yn^yX`G4`%J5;DGZIxX96M!Wz6RpHfYbl3jxlluNKITGq=j5Qq(xi- zq-M?tDb1N6wQz-yTDc-fZJZfWJ4ZuW%vm6Ha8^j290h3!R|;tIIQT{ zw-{Z_uC_o?zI6XJcq2v#*7_Q~eRpEujo1Th0^Gwc8xJRP-tyKG~uI0MsMIzQ)7OPQ}BO>KWS zQ*+P2RTEWH8?Jb^mKgUvy?PIfjZ=M2_Hn1N;cB;sLo*J~GCH+d?$A*y_At+B$EZYw z=sFcO0{3{*+_Q(N%YI^pd$_W%>AAA7Sp#JnFL4j&f$N^Ij`3>lPuE~5O?jp#X+8$C zpj3s6Ez>+FU9M>(UAl!U%F^vp66pmblj`kmMikM2nhdq$w;%&Zf(spC*5^=tn^&XiC%CnrXe4_sks8 zsc9o^A|=w2KD*B#^Xs;U??IbZv&3t|8fiH93@txs*E%2Z+8s3kj9!k^|GdX<3(o>;HThq0nA=y)E%+WtQML8&l<-ac&GL(#q5J)(+KslG2YD8a?ML%^TuoMn7CS? z)19m=?1MMmi!FQ*JDPcJY&?@sG-mRd99O5soAxZ;@|>-CwfAe!;;qled$E%lIOm$w zyw=m(h9g_JRu2BrM`ZScMNr9Pc+wnxn4_1|lZ=4N-verI)5uSpC?MN{dwYuBfgfMU z>3Li1tlAPqWn)G2v^KR4*T%UY9MD?3@pi5~(XQ6WKBA@1RmLu6%FvhO91jk@Zv<}w z{^c!R`CK$x$x++UEQhkq94^Tj$4>Prio83NmUpjyZnn;Fwl4Igwd@ybf3qM1HP21^ zeBt97?BQ${*;v>1zUzY#EuOz1uQS^^KceaL zsJ2(}4$alEoU5H&$89}xG~U5=!f`y_`D`962lbhWdwM1djvZy}4kN|YJ~*beqre9( z41Y6+7rIqSAh!Oi8hw9U+Z#pS_r$wE_Oxo~rdMrcp+++vC$uf(P{>K`)p$48mDr`W zlzrs7b87bNsFx?7MKAA2b#+|#RtnQoT+dve3J;mBsAJkQa}3WVscLtSR6A?X?(HxM zwqBOiIM=U27xzA=7W{upr<%gn9Qse&x1b9OU=C(Vv5EeKnSvQ%`@)2V&Ii~?AgJBEYVny zppBWS2h%xl5G#^sWMlX^jPyBqq@(%FdGtJBrr{5WKQS2Z)5wqu+TQ1Q0T*+wZuJ6& z)Uk^9GcPhTT>l?Xf2aqmd^ht_d|*d8@|DjhNBXzQ5zd&lxLvVD6)G~LUdO!5d=+Pe zzOUnUKk#XF+*yk2OYFEpk5Z-w>Vh~86ZpO8d*4^UeT1MJ;?_FEfvGlcAEXV z{MmSop|;_*y@@duzU(7tUwGG;3HONa=k_PEywFVD94dbStbiBh(mpCzcUcE7!R0p- zALIWr6zAQBvz}{S=hOL?D|zA79Lktbdk2*9zn+;gu7r-wACDjSwNu5_FMuizJR?;c z1Z*FiJE4x(Z$7q(UNwZjlY{Z}ti%?K2eo{l$b!lEA&u^x)7Ih+`O+Tcj@*3hv*C?9 zlzCIR4PPJ1n*E(p@soW-jksm@cbFSyLRoqz%XVhwv&yxzuRGi1SulQC{7o>f55EtV zR9LZDFKZ-mdd^|zH!@t=$5=p-&xS_`OomU`nO+>ykY%(F8&ZbV!2V{}#QvyBQjvJFiiNo38q zWkHpzt46ueL|P_g>cQM0nP=Aa0X)GUX6kS`vz@U&&RkQdr-k!o^TKoAZ-V<5GraV% zv%(`^D%a@3W}vR?3UyU(PhITaFgLcp0Zuq`$17U{#dgP!?&w45;N9WTt@d5)Pt^N~cUKB^Zfj-Sn-E4F zKl=L78@e0RO~Z@S4PEP-lPpe5|4P@*;;UK~Bg&*gHn0#1&iZ2ksSraOG1xN-QGZI- zhhch>ITo0QpjJNw2tv3Y2J9_Jy0BEBM3Lq}`0IjHH1EF&fuz?0aKC*4B25Ef2pn7p zg#Dq|Evfi23GfTg-bxo0K`db~=DQU1N2R<)2&0rN3;+|kg^`maG;ki{Sy`M*W^n?r z)x_dE2(}dyd_TbAD*(0w6H*E18rdF5TJu=+9wgIzAR6^w2}C7ZZqTn)4MbFefUUzb z=gy4!ApF*Mc=+t7RGc|KKYHfu#MGo@$rK$QeGx)yjaMN+7s5XwniJw1As#XcqbHfv zU`MEt9}F)n#t6ic&Cf7F9AAh$#)wk5To4FDka2)Tr-nU_fa?k1qyRcGiRIW`sW`$0 zp(@;}#_+Oeok$iXz%qgZ9h3^>Gl1x61* zM!o*u@zMAW_%_A6kIDPmmDL-GTS9Gbx`b_j8W6+llsoT>uvh%y;F2H5Kht3uR~RC$ zbX_V~U;&7j1{s&2Dd@-5NN^S?DK8ed8DlY0mwi#b0Abp(07SAbBB@EvGoziFoSRIDw8asRYK%Hpdnb_SD|AcgfDg=r%cvOq% zSOlWy)wAN^$?5SkQ>P}5q=_+4GJ)8LFc^wn%>*v{7eg^$MzFx0zNjCA(q*nNV>mL@ z^+hhtwHh(WyEeJwI30Y>*b!X8lClW*|LLy;deL#R+s#ZBCQ`Y4VA|E01 zIFNbHOr4t^J)^=b6-}k{z)TsIHADH?2_S=WXHH0XGJNWkZ*utL=#oQyh9ZOD0g@5( z82PVXqQ|GFPj#LNTn?}Rknv*YbTAe|3Qf1oo(9-qwL=5~^#hXi$kh0#BD;XHq@p8J zDsW#GmEG1*nPk42S>zy@#>adzS2)Md9X&cRd30>}h|DZRtg^2nVu+BBLJp+<2o?|- zNhEv>PjP-T2aiCHs0Kh?W!5lAcJ&0Q5iu+kPfr~korV$fffSShai~QZxd_sJF?JQC zt}HW_jE4h$K!<0^t0E6YtJ-+i&09;|5KRy00Im0Bjrw1}s7DpiXy4D1{l0O#4JQWdq_rH z!2-6-Qdvd>EOaS_U|vZ!v>c~LC%^0=KBMgctbHOh8DM;9yMT=o04*F{vdsdsb;&1P zOyK%bv)Zqm3ySvZR@?cqFf*^|*Dn|l_8<`qi)@I!hIufkiHQ;=r$lXl%vwdAV3fsF zbZPKh*ncS$fOT%3dH;gvR%DU&kejf??;*ml%X8#vVD?(}#ZHeZ7m12-q|Nd4N<(6k zwO(XP@p1W4@T!1f6a`OABeEh2))__t1F~7m7D8PX=h0X*COAvO`Y}OkTwdOX^D$B( zSz?6?vSho*1pXCq&4@q-0{{?tM7Gk<11W&Dio~N^h`@>};=;MnoFQxaplbHQKGNZ+ zL}wewvOxq30C5Fa`c9;sZSKsQ1LgpmBmH=JMIye*mh_RRWDJC_2U&p1088*BOWNNg zjT^v)lnk)+PF}%i03(#BfAoyN3=Dv^4J$V#yI);2=~LjfWJ+*UigCQqCS|KJLE`{i zrnFV3-et`yBCFeE3re{R3&r3zSQLm>6#=i{h`gTEq*6YMG$%Q-%o(+5TXq?Ogh9!Y zNl|*R$W%2IU|7*X#46=jB@dCrzJ=MsQFYm>?f!XTn8h2)>SyBf%I=IW3b*(`K+2A=QV2_w zePRE6fDk3 zel8oCmXL)BN)#lc=-n(ym`lD6eh(N@X3d)IBfysdX__E9N6Fb2C=4oxJ>d)G#jfF%*K%RLT)w*k^OaOp3qVnYdAI!JTFiT>x_;i%m#V`PuY92%^2@f5TBynnv7%8d zsS+!jp*xuI^oiB&u&0U(D~mp$Ao(RG%hr!dC}(9Nf6er`o1eSMJ8mT%w*<$nWus_z zCz{vItx0q1rn~XC$L>tQ!s?PL!CB8cyFW5e#pQzC!`nSiOKW*gPqMUEDDC~oNLidv z(Ml>d@y=aJMR2I4DX@FgpE3zfT9jGHwL0Ndmy?R-L4uhZ3u!JnT0$ya~w zzbg0_KBB0@`f>d)@i482vrGD`cy>wusy;GI*qu`bxnz?re)ob2aa-`0`kW2M5;n{G7QaFgJln$Mh$$qhtDWNV2o~ zAfC2>7%?;YXw8l#VcD$CIkQ^{PH#E3v9h zVMnUE{{t$o+VM+F2outT36;7J8y^?&Rei~-DWPg=#Y7+`95=Rh^f5wvSWnP$SbtuI zc5ZuUq2|kmS%mIoLkPi})6J^~1-)Scdzio;Hf(y^?hmQj>tUJTJ%lQ3czStqbx5Ea z6MlhixnCjBeQOg*`heKdvsNLr?8B=`x>0OuhuoedT`x9u-M=9;?NLu%jmzWT7#A(& ztE0T74)?~vhsW0&k0u+BKBe8OuO;brfo|vNc9E`zL6vtPsG8b)$G$W9`;&b8p=9;2 zP(8eSQnXbk^44t)ysZH$fK;#Dl`QEKO8S6vOFK7Qb-Z^^(lsc!22%zo*mSv5`EZ1~ z<+XyVBUMNhyFujb9(-s?_KykuWAEzUDdMYolJ?$Dq8zcbX7xH>(uCT(YV{4?)&;xR z)U%Sm5=z?Z#kT&He7>wHX>ZP^u^0Hc2n_up{W0XBv%2HzL7|=#guzY_20NkOEOp;~ z`R$hzyVvqpUQU({2&DtdW1`KqdXTrZ!G8aZhc|`p@y8?U-KY5OQ_!vI?)%r*UQ1RT z5vq=?m_%3YicNG?ERT!jZj^L&Bw5K5?d7W@NxKKBP4u<_UbD&SB!BKILZ`ooa9jnt zhr??+qGRx&P)m%!8Y8gA^iQj6zi;}1?R&Phf~04U;MtR`9u%qvmrrhZ>c2n!gQ@RL z@%=}W4dX(?c+xW=cqX8g&7Fv8qow0P;SbFZ%v-2wv#v{Q91uO7e9v&wGn_K!HCs~@ zBno+fU@32s35EWLo`?JYEo#c<4PxfCB`Y)iusD$D3 zbZk`AZFKevod?D8M$ru>Q@+Rg0R_n~F##-r&E)QSQZu+-Gbol-i4}EXZRdu&UG#KA zMy#w)*(mqW2b9STcR;c{2?i95_VyTiTGe&`^@oees!^e8G>tEXK?wZayoKz2V~;!5 z`_3o(&Z`pEw4G(MS=zTn!fvnVL|a*kGFlvBUCX-#?^su?8(o$Tcs?-b$8;dEFvWU`C6KXZV5E}th(M3<^oSw^%eNRM z&#us#d)I1`O$UXhgJ6|yH1%YiZ92hd>k``z@FmS^z7O(Q29SvuGN3#+TlR_d10Vxx zp!1j*;Zlmi#EQ0%O+5wMrev#a10FgGq-2hE-2hd)Ldo`4FwhPX75K9&p z(3zwBZq3^@t4qn^Hles}`S2%+q-;U6mmuJPKp)`g1Dh`I3j5A$yzBJ+(zWYwfc;Ue zd<5CBqd-COy!o&+S#?mTI;fCmwxBr1pP36%2E$QqOoZwhNDWa-O zE=TR+92A^`h_ULAXv(_>Jm<(Dg3tI(So^*j+ae zXn8Z=GRp5df+_4}W6MWH*FK;q+3FI>3#zq2cqf&#?-%U*L28uYfRtI6$3Rf04dem7 z599<0j8TCe#Vxl`F1C0y3HD|=xz`j4nPZK==ngRcs`J$sR~c3m~whx;GH<| zPV4d!aBb6$%^2T5%D?y(;YHwjVE;=3eTk?VEJTw@$uwas#Za@~(gB5{6C@4GTnSLHt`lWn)rAl~p{m zdEQSbWKf`CCS`E5!uzCR*LuaSwcW{zA)#Vu`Pinj=83aq-Pw|Kwh7L*s1F-TVwfz=p9IO2;KqE2j0`LgFYa~PaK_3VYHPdCqdUYR4TvtshpQ-^@%TNO}2npzB6Y`@)@E&;hs^A@d)FgN(9z$|^>mdB(1V4R+ z5Xn`6zRJ^AKP8c*hr*K>U$=DdmX4${sm?t_o= zlkPFWJ+?fx>Fs^u9a;ymIQZy1zxPDads6V8M4@PNf+d%8iaOe>*PP zor&`gqmR$=m1kjxriqz#mZ-VwrjQ$hbQ$HGOB46@@FIy(Epj4j=vC>9u`#S1Q zybwPw4S(8Lb{!Qq*HgVzH1CF9j-S?1FH$=AV+;^=Z^-?YR=kiagpbbVP0F#=7_7E^ zlmx!lQ-Ws8FlaWBfi5ejMG^$ly3Wc$pAcpQUq+2WSR}+6eg*E33N9~VT=v#L(1uI? z*zDD;H}=~_)Ba-!sfc=@9f+p24#|6BVEOvD;%~*j@y4Au(uHzBt%>~?P?vp(3B~|R zd2&2Mbn()BFebk*+raVy{0wrKX)R*On^62xl4O+uQsaNwyI5#aKe#K=$^|(@!h#{v z_<}EZIT&DAn4P2~oBS!>bg5*Nhawt9>dL(LBQNhHdv5>PzlU1i#14-E=fO92J@tu^ z?-ssScz^hNmV{+xdbRGI3o94a2L5dCpYDA)m+YGm`X+8qijI=o$2SVAl7%%wVGX=I z$%l2OVs%@JDlCNg5J;9!Y}gzt{olBC=hkZPTk+-ihNmGh{oSJXidHOQefNq5-UQk` zVnxep`Mug7SMn7jYx9uE`%1w|!BcNj;`(9GYWtk2vDi&~9Yyr^gN@HtKDfqk1rMaI{SS&NE_ zC#T7(A{K)$*a=LIL82_cVM**tCziHjsS+NKCf1T&4fpU}#~_})o?npY84?3Gjdz^O zx?=?IXq$|9pl*=y1m-UVU@;BE7n2|o59$iffoUz_#N`L9D{cPbFG;CS42z<#ILDE)As|rxIoV z80-8OO#T`Z60xL2w`Txt!%H|?>?S5eeS-N-MNv?wI!@Mbh3O}-1t1px2vytP5Y?6{ z^5)w`r)${+;VCO4Z@sYmf)eqt;cnquhm!8Sf_pE%9N)nC`-924Lqgpl`Mq~d^L<;g zW>BaZ#3+QP=_mwv%k=Qv;~?=hE(-KTp1%0hRU^2XpSZf#U0rME9_{8`T}ju3;F?$- z-=Hh_%HbcMT`T|L%MV_D*!S@8!#;Rz0%fo#>B9njm?yh3Co;fWa97@5i0&Z-POO6{ z4SW}012c(($(kXd2Fnqs)yBl)+UTPfd3q{IPYE>4kxk`*(@4M>7hL1d0nQFhZ)ABV ze9fuH4UoW|bet9(FdcAu)8xG%(b>tfQnC|DPH)1vI0Kzrz%ppbrZtZuA<`m2VbqjiSA z>gqaLZTL6U`HyH}$rLDOuNGax6jOzF&r@U;?)(O)C_ znssVg%+c0hJX7$YN97-k<)y+C@~W4;o-2K1`;P~gdX5HSo~wb-0`RdKkjw<55TM48 zHEcnMCyfQVZ!DfVImDc~GI9fi4PiVpL}#l@e58D`;s=an_@$1>JebWkc?l4NHbM{` z{x$;wEH6KkIT+daKOhUe3>mNvjaI>k!5;Zzx($Z;OgwUxsEs2ZP3NMV6(4v+v22Ea@ z9vJ8j53It1usqCr{FL9#r_eG{ACe(@ z3w|;LLR4W0Sj3jHtQLO7B`W{uNC6XpNXF|@Aw=8G1}^z$ud!rJUM05i=XggR{+bPo zBtgc5Ex;TyA&V*?Cum%O#ATL{yXEldSNY9MP_H#59SPmdZ(CcGNN` z!`ea0V$xMjIl2m z(CKs{RmOkPzO{}>)$rO~q`LV}yNgr>uiZt;dYk;E^hO;_n01geCmN>W8e-7XwyB`KlwrcIscwoA-20@vzc@)8nwjc+RkhhDT7i$!L!0763T zVxdK}l`Jo=ocjiMhZ9T7#4?u%wF=FmqbyZ7S*R;YHBkc|(RVphzq@xO)sbO7K5xM+u=-p6?dy3D;hTFD9Myku&O_Fvl%}qNKxv_7qH@Qy#|CLtK z3NZMRwlm#9|NZuR{=eV<`R(%kf1*FROs9TTr%R^b>N@ZXCga-8)NH!o-uC@8ttYee zpemE0n5^5$6phUuASMxu+N>CCcJ7oCtq%`2D84 zkhqj~Z4RHs${9Lf@SY~um4oyy^0v}PE~qSXW=;$JA{WDZC`Q4{2+b&YIZzd!1XRr{ zfNFRp&}3c(RLiS@rtlh|IzAa_Dz62a#-{*H=XF3c_*9^Jo&uW58-QlE(-t1rs;8^r5R`qX)~Fe8nND+${EBm zMKQ(&8s>vv_~KGYRjQrV;+T=>b`!>!632+6Va@0mKHX>Ll430rDPq|N9FVW8Ej97H zMz_gX#eNjUQ#{REcmuCr%vn$Xp9*q6+oVeqd$2`Z8liPryCyQkDu%~Fl7*w)8S`y9 zZ+R7Qwcbec-U|;tmN=NP+?K_4`z%jMrqhc{%~F0Ppr0E-|DPFa0{Zu~R#O!BKpvm> zaT%ZIrI|bd<*amuk_^xa;>si)X{NbMaS=(6Xzdzsv&8-gu2=@~+MXb)jUhLu{QfY{ zkHb9G`@Jyc_9tSV>Ye9FdWy+7OWmMZ{~BDQTAda}H-_q|L{vpIem|JSWK~Fsi9+S^S(6CUFn*8RVhW)Mb+>WOUSG;R1NFyXeh? zD`557pEQrjw&+*@*Kt>HA2=jn<*m#%GH5q-6y<~`!P@*ayc>88pU-RuL(wl5hfP!L zPE&k7pA(*C&KaA9DQSi-Fn?*R4*Hb}wG@=}k@g*u_8mfP4p^+to5zqua~%&shK zcAy~N#xg{+M31vPJ#>Ps^?WE=RCCnyZp6xB%G87N%3)$EsP&#o zhz8rl83E2);9^Q7rdcGm`tsIS1zA`J?YpF!6)5Hg#i1Z|fCT-Hy;!W{ix(RbY~9H_ z=ba$$oVgtCu-FfzXW?@}I~o(xhMcF;fE=D$PXl${GC>58Fm}*nhRJ*q5 z%bPcikb5`rzI-~X@`-4W5K=@NPh||&m@#<6 z8^EY}2qRlk%$H77i<^^r!~aLlQ04uV)bb~4#63q+TVN2;hB`O>f}B}L}6>Oci|UgRb0KgB1$VsT%BFRSD$1f*HaD2W;rGOcFy))&;60 zCVL}LnPZsZe}(327LQAz1*52%Z$L{P-HD1X>HACqD76V*GMqEi#tdo0bhCE85fZs9 z(ms-E4%9IteBCU~>bZ+CMwt_Q9T{>N(x5eFqv0HNyEN1NKTAbMo-P$pc%M(OK1>jW z3sRB)lDOcL=z>ixfqGD?Q?4*Z+vSkX%jscd9lDzyH%uG zk{pS_j$~Phy{B$PRye@3sngn)e;iYKe;ez;7xfy2ewV3KKmv}QK8*^|dD5SXASc`*V z%L6@Yrxjn~@I8x{7nG20!Y_Q$6n9rF3;C7-`g}}kqtDR{73jIrbCJGCJu5#>U8J3X z{cITpXT%Ta3N{_av-pOOER}spSqTNFiX1I3B=^c|_AA&vcgllGw->&nxP$5`x8LtN=k^Cx<8Bw@X8lf?FbmsH+!eh#dE^0y z?jb*__ze9PIZ;_U7p@N~eeCp)%Q$ZSxfX5GF?&zOrN|5<)opyOyg9E z+vcka$c=J4xl=tzlxTNWmdRKTuI~imRL3EUXJ;1A zIP4^nDI{JpwexQ_U6_8CT&(RYM!B;B@F}eO!i?L`jdGWKZYLd)OfYG}KkWqx{<8MZ zH=WZ|1qjCA0^#A&n4KcOt~<2pVC5-cF!@yd@aBE5hWpxgo!GRm<5dY$uf-!=hU}qY zla*cPH=RlOjqs}18px)7OW%^L`Yr>Dp!EnA=4bIy5F-G6lU#4?(<>7N-$!0NV}xw{ zr;S!6s;Kh_`Jl0EFMg394N_k07yd>5R+oX7LkSP>T(-V3uIVL5=J34C@{} z@4h6c5Q`rUL8cQ;@anti0zFFP%?-4Nv^4M1qPelTNG8eE<~n+Ye5<*D9wEWzqx2_8 zbxR@rJUP-*r2Hgy`4oAsZ;MHX~sS2XRixQ?q}U&Gn|_> z;+R{cwzbuSCuwJIoF>;|TOGOD+C=`Uwfc;p(|K4I)@djO!FI#8&tmirYi;HjxRFUW zdaI~wU`Mfe35j4w*Klx>hjUM{Zmc|u{TZZF6FC3d@l1pQS|?e;RhX^)p9zi2aPpU3(L9CW)eOMoKHYT&V)(q+D||8X*<%Mw?2Y`%I^cx4r=tfW#iNosG4Bi;1m zC6vhct=^haaih6g*l20r;J(#@k~!LIN?8pB=O;>35syN^t@U?$%y*=kjuX;|7?A(kY{jc>2d1 z#X-3+Xv?cU&Z4aOq-g(j@<)5RvO=n)eBcYnBP7cyxuN8x-|oqx%N~Oeqe9qJ1uFNU z5GMD{q7d{b1ogo6;&=veHyQ8zmz{% zK$pa{2=Uoy`4lSMNL)i#S|1z5{YK&3v6#ITF=hjxOGyXDXsJMocCK!(+sb$&WiBM2 zIN03qShf1a=CQTS4A9RQfwDRZGebk8EkVRLW{6Up?IPJ1?gpox@#b z0iVxro1cAs_y?Zll%WSHLn|pmPyh#Lc-7pp?Sv3%+`fv)jvg$L z%4=f{%g!fPjyjrS78TX7s;%fp7FAeE-Z{EIZoRyXHTrQ1m1QP%$12-(isTR_l6TZ; zt9sL_-n^P$vubtTX9Xv@4ss5G~)f=^apbB{@aS(^?1MEvpu<^*CxN; zQ|0XFHOue!nicT8lzRL>Xu6-!BZo_c#DxwjA#%@5Vm3dF@l$zrhJ$=0A>Y0_;*@Qd zle;4o%0I=B`Ud&=NG|;E8VK2heDeFWo+Il{UX2r>M_S8VZg+ z@LyQbe}P(JWaP)AMzZhJ=Sbh_YHjo%rRLS}t^w*t4QZJ$P2^(K@Zrff+NHw;C2~!q zkRObGXU)hjo?dTc8~N~bF>N6?k7p+Zl$@1xx}0<#nRGSKcCz5wmy54c*={5~Nd6EA zfFWcjyNT_L#e~TmJAh<=Tw5nOaHe{$6RU;3(R`Kt684ImvA&G$ct&$(2uUIPDpp_? z7*3QHu!KhkA@2)ndZx~};e_Ws$Kr7{d~)~(mRMmH+R_$HoqlGfd&qdt>9Y!PuKa zy_u8TXOzELSl*W*|6zur&q)0459wFPg|n^NNt_4CG5OMW&t9a}fy;*Bag=$EJ=8-GUM~NfdrGrrhS)K1wOw*`aNHRZ1<6;&Vaha zB6DFBa4Pl?X`9GS&W0lVQU0MN*TjTOc2rCLZZdZ-BqCbfYL+3?a#Tx$O~uhxN|_mI gi>nYCAF5(o)Hjd97j!=c+L4{dalm7XU@z$XJ*2v_n`Nl@vgP=ETDJp zANOne%ie3Rz4mMEwbx!>zC6nR<_Em~nT!lA2hZ|DpNajU6M7SQ{O@LNQ}`9!3QjZ~ z;Y9OU1;@Pr|Iug5QU@mzkqq!+!4a~hLo7VPZCNH30d|T@#S)-(i5+4Yd@t8nqHmH89*bel~;u63ZvKKH}s0Q7U_U3dq3>#&SI5;;nH$CFJ--nNlR?c!q-#Wjq5CLCR3F z9ygSzB`Qv$6ajdnD8xYdsc>zRE08101g;p^VZPc?{GQ1{r&d>)w2oUAQ#Rpm{1qz7!(QXt1m)d`hHw(*RHgj5UR=kAchIBZGdT&9JbV^T~Xs%!7OPkIZ z=NQV2r=Sr-JT0atw}n>~r*hmm-C0ns7f`|2L%3syDmlXSamS8E$BTObDbt1*pCglh zQ*2r+89fJzaJ#@(E*p{TWvJX`18%8|>3U0vqh2}9I^yWxZQn0vT1C+j+d@`p z(DQY1Ory)XqFbV+EV2E{>dU@1m-(1b>rSV=CSIa)S+I=WSFl1Wt39J5)}2<5ogT8c ztdcY186@bMkbz_<_S>se`8+8_%8f1-EvfAuB zt+w5=7KSx!_4eEM*`zMull5$9Y(FD|w4DYAa{RJB-k(mV!|w9RnNc?Ad%t}z5|pe1 zJ3xd*&9=W}!)8SUJAh2=vKo8Oq$WD=!M(@qLEI$BYM64?e$p=Z=rYY*z$!;Nz%xXIk(afqzT|;=$N|p&$ z&70KC|5C+e6od_)mzA&Rg9ZugK7U!2v`Vs5nE; zvD+h@IwS8)&9igQwNF;GgeqDXA)-h#=7n=i5B1#N6E+ovOpQ=GHxFv(=0WWkT!p4h zqR2Fw!}+DBS}y0;2lMNvOx3{|%O_3UAyaqI)EzFXI<0+J2v^QL-S)70 z%2XUI+4R=TkiTUkB;Ye?>J6ECgQngo(~M6{^Dmp`hbv}=tLvw#>q6D7q3Vus-Rw}^ zlHk&fp}L!@p35~~jVK6oB*%2T3wvB1@8Q)*g%X$595(Fv zxNm2uZ+NoL9rE|Ng9ccNJ=ZTohH;{60%dsCgKiw|XkA{*onKP0tbn^xTXKN01^+{`dq&(YFlA^mqydCM zgk1oZG%3ZBA=QQ8Mi@cZ0|2f{Ii{1w=Wj5}#Eo&3KA-K^jJQ4CsDUxY%ydJ7bP2dU zE?8eMFv@(5@jL9u;1H&!e`mwgAMR-E%;3&97cA3r7xdar6@S50*-7{di{}q(DmG&BUOj@IXdka`3{#th}w#k*yk=VmF`E*ZF z@M`F(yc{}pKilS=;3vkm`81C)xy`-*PxAcVBG00Nn+f`Y{pfUodnch~|7Yag9o_TVJRu`o3^#>>aC%OJ zBbtTbh6O=g$-{p$shbs^y#xw^g>{qaz|rDv($+zi8MyPx^bR9;-l$!s;m;RVE>rU7 z+m%4SpcH^|LBnI(pzT!i7YZsndHy0VK-xy|j!2722h&=?%)@a)HwtDt{Ir(7Sej2- zXtCBrr%KD{#nM7Laweb7uF@EXQ`Nx>VF9$`CXvMa_M5#KFqGKi&7`H;JSK2kS%Kag zKTDu^NJ)2>Sr!XkW1Jc8Cm=2UoKsw@L#h6`Wb$UykII%;YF)L=o6e8dgHuc6O*-e? zPzLSD%!6YikJ`%jRN@jH$lTiFcGWL)x^25*{;3K?cSYrSk}3>q3=o%8BgXuZ0850adyW zf%(MI3UqOGm!gCO4pk2-6!QrEA2a6&%-5%@W-cw09zk|y*nR*h53X$dKK&@9>5pbM zS{A{iNhgsJjh{s!9fdDhIplU=03g}wcKY9HIuuPLP+|E6sZ7$JKz5DIYTIRJR~!`_ z&7)4Qbcz~hwUM^K`dNQRnx8_^e01$jLt4s3f=W7z>FKhhGf2bQIkdaJnrx!Z`VVJ+ z0eyc^cnIAI1^n}9!|u-TK}5+%PLulx+|^(l@dr%Z%k4Szy@f^kaV11cAmr2vWnLAv zK9)m6uT&|=6<#eR>vL9mbg2qGl<=G+xIEmQo@`(r!MpDv-Y1OnE;W?tlV#(4s$^AE zvQyFlm)6G*LNMSda#KoIfv!u|hOVbK?9XdU@YCfL1YNljmu;Tm2)!xD+Wk`?vt!P}6+7prYX&mv%Ow3gGxawQFSCW+H zQ~Na7Iqc?u>bpAF4INY;hA7#%`gV2FE3_zk*q!t_g{<U8asb*JklOBzEZjT7s`H47%XgV|M+y6S&1mxs)?pO_min;Rz0vqR?D z6RW2T`442ml_#V1%z{v5JEZ9C&9j7viYu5EIM=+MwAXdcZ3Z10%6yqUk$ zEI@|sZE$zr!hZh)!VSg(duh?!V%1Nem?!kMxg+&e0-RWXC9Dm6HFqhgynw>rLAZqQ zE`Y4Fxkp_vo$lRsmj|rWZSxxq(^;eobVF|reQCbI@C&GMAI{r2Bo1<)(h~>$fu02l z1vyGLF1jrIBalx9YFgeVtA7qjFc!?r&4aWw9|;NxMM%g8BW8un^k{AF;Sr}Dmqf)+}KPvTx9Bt-Ff5f4>=ak1Zt}2KTdARIiAB)|&12rHkaGdR*tzK|oZ| zV^PaG$?hGMT>VzhewU3!D&!0ZhuG{)mE5D=XoNFuuU*>dhC2z`aXB+)?J@L&@tJ7M z1&y&KL;4iHq9KpZkXNuC09|ELq{Y8Mif-2^^E*I~ZFX5B*+-lXo7HQV)4guk9lIbv zYjv=OB=kz6!gtUst8(ZYZG{Vd2eb<~+vqF94W)!LSBJ9;!sZ#_+!9QbHirvq;S(`x z@>CHH;9yTAkEXTH(tRJC(1*ZUjT?L!^vN}5dTaX#SwMf*esi(b!R=-)D)^vUpFvck zRrMJ*aX!7Mp;b%D3$;-X!=n__eHnW>slz7#uK>I{x?_pWoRO%n0h%7`yZ+$I5Hsno zm-G_@ZC$#!(a5Mj@Nt95)}R$Ji?v3??1T&g)Eto8L_b+-Ai4B+ODjnp)pacBOtzy2 zo_tnQBPOYd)_pe~Q@WT<@9eNw{vJ9gYlp4-VrnjH!6Agm^G=7=BmEW%rGKT0WiBGq zJl3xO#* zDwOm;5&8k>y3QsoL?^j_!WAy16P-WNrsqDKbveB-m|hrY?GhD*tOnDJ==tm#9cDLK zkMvpK&F=RI*-MYE+(~YwzgxM;upK2QYkWqVRxKpA)1g((x1eE-%7|aoqgEHaZ7Ge=Kd+i4WI_E^fr{1rq~afhSe%6G;*Kk} zT^uK=r#S@H2|YiW(*WnVBId#V8s60KBIhLI3RgEoCX^sJ_P}umK8-hxPOT{>?6RUy zjeZZM(c-l;bD?4%=TgQa+&+a>4Jr(AZY5}KDJ1IX6KnG`3~{L(cS=W3xN@*0OH7~c z_j(KIVp`yb!1A<{8G4aYe@GvfWz|oksoo; zI2YqM)h! z<8;<@S<{DXH5@0$z7K=Qjt$}h8$^yXYdCvc_$vm%iMi8Tz%j%-%+cra(${|HVDfG_ zm_+-jYYw+yfi3iV>)GI_#$(7jcgK>7>q-W6g&GFBB9f3Yww?W+=D^3Q7_M^RS@VS_lQFprABXP&{orlwCJL zi?EvBS`5`L#cuJf^s5ckdF4F^wDDLvypB+LAObJ#rwcc_4YGdI@}AzC`uaOItm?n{ zrnP}*HoC|R$pOPWkmqoLtK{-JtX_Pz5fABGN~13ea%ughwr*MB_Q+{=*ItL@cF8=P zO0p8)%1M6#5wg){9f3pBjxP*imv($vCl#R8C`710D5p+i8CEsM~!g4S^??vGYO_^mYE^*jzZO?QM{u@rgtOd-zy23p~|#l#sh<&DMG{61Zh+ zp@QtB(zb_4UO>D3RzijXJNq}1jPpLVSQS&UH|g6uz99|NxucR6?lfg56yBGO0VjCv zH-ozC^5&nLc@`p5FJN?vJ%l^1Jl4Z5_+ZB4k67bHZ!Go<6~-_424LPBeaq%cld8On z=-WHj(W@Ku#`5u8Z(FOdbHdbN=-L+_NxZk~VNJ^YV(eRLkHriXdR-lw?N>KBqAp|GDbL!}!$IixKq+V0U zNvG^{{I_EXUeumbMBO(eSjrel=11Hk(N?>V?Jk6)01(N7^VlwdKPSD2#AfUcI_R>& zwhLaeVU&4Y^rgEr2g@+BA#Ffb+&^+!Y;#HJKSbI~2zvV1K(T*57UL`Y=-W1F9#&+# z%Y#hJuNcA99t0NxZaWs>3&x+?Od4P3{#Aabjgeu_v%~*M~|{ zNq6~5O2Rb_!N#SbnvSU%b0S7AxBd#J!S`YS;H%QH;roo{Gd*uugX*P|>ZKv|Qb^D| zS4CG9cOh+~dXrJ{%S=UY757VH^`=F_FXz`mI#i{-Ig<}Hxnr0=_iS9NRfB2EOq$klASa z6Pt!heSdwf__{5_A5+uyqm875`bUdNDSd3TlWe4)jgBWM<9lz`l=Z~soBnEVlcg1B zof*(hOhvZ_^j&JhY&*hIgc>?;thtRXLpSCv2rCg*A=DytAtY6PH;O^MmsVrJ8v4Z8 zJpWq8h{QV)Sj+2?#*{XxdI|NFda#sf>?*8*$}Y_Xnw*X*=^3@zK&zulxe-}WU883T zQ%|O-Oh-{EZAq<6zPDjIYcV8m_0gMrQd6hvr*tc}a0|j_gg%6u5SUB5oxZlOGXGWV zCc+A^r=A~!FNs>ZoksR;$y~>@6cM#_ZJ=*|Jzw?R`k9;e)zvR^=##!2WBH3cZ(Ds57r2frhht_7x;SIM3fJ~NHG3a{GIVX>hU*{bM$F{ zi~l^1{{q5A0EN}XagzzrHPZ8MC{tQ#^0l=&L=zJo9+3EK&bxlQV;`;aa zl6C{G2g37#frEOYaBy_*A&+1O%8%(E4{d{;xAw4;-vK`T!Fh%BzaH*u5h2zDj`~~P zOpqCSVE?5}_eP~-sj(|j?IS}P{BcPECgRmZO>7$%ii&Zbe(#PFxV)azQF2$dImr}v z%8QW6Yo>pDM^Uqn14jK>P?{I;AZE{DxU8he&PjwK%8V#pR~;HJyuombeL{T3Q{$o6*N57vLqc-|W4ha?NI2_v{9jbhGffYs z>DaW9mtHsWlBn|ezg78C27X;_7_e%dA7-W@u#O{Kyl?)l_JatdBk_+_QL@ja9 zPbNh2+d%8x^`t`1O1$My+#MhsxD6}W5pJixd&=iH&}iU6Bxfvx@Y?eDHA7Z<_m9}w z%U(Hcm(^qSdZnlnwu{R56jxp2?!-cgBPfl9K-`d`^%I4(r#HWO49N)R@nrZHC7ukQ zmR1FoMG?gcg-|_}4mXIJ7wad}7lzUoM!0>1iucYTBlO{WTY0!{1k#TwmBdU9$9r?L zSLbs-&)2R|@jovwTSNHwiLgdR_aA>|>HRpW7XaYbTI^^IAmJ4pn=$@-uH@^W*N`## z)6A%=c7pCWVbVT;br5<2hfn-Qp<>7L0Xp=+gf4kfzsS+QJTMDlJGBqikOOqngX^{X zafk@F2F^YBDQ^@}PCXJE5E==6?2#Gpt_0EuhXWrx@)pVeSL{UW*Pd+Jy0F@R(kCCS zN^L|I$!YpSPl=;((%J{P*9$6BWiwr?H}Pz~K_v*%beM z$1~J?>U-pQ`opJ+0vAthBpoP5dJ5rbglYtKVzEvsnH=d1Qn4!%-nBvd%CXzeAmKTB z^HZD2tAQUrRl%1sO@^=rgp82kw+J{j(w`Ar^iOAs=KgO;%1Re>TVgZwJhJ~609oDN zkL3P-=|7R3^@2_2D|E@(Lh=mVdbU{g25=rIqKBT!(Ztz*5_t6N0Ga(blvp%3{8_^j z*hZou=@rbrL7Se<);@wsga>HPvxmqdf#9>?4$lJz5=@2rq)llMU zM6U_m*iTeZi%L$nxm^Q}LHOw}EL%OHyPsbtV9Yrjc2P##cK#dF`@M%E7 zr?VOG13`L{{^7MkKU-!t`fIf3HQ)i~>!-;2GlaxMNbg}5HC*~RLP9g8_aSRZmo=>S zq7~k6v6enSt`8ADLI@&E0?6q@^sd)uksr~(>x)%WK*<#XU%c*7_&>mjf=gNyK;s#K z^$@|L>+X^M2@>5s-SG7E^nk5Oltxo2wXvL+E@HFqAY1~lY>{pO0kG@cUWgh+2f^;5 zVD4sdQZF{n3=gvKqB_w+9U{?+=vScHmGRv9$pkyw$7s6feq7U}tF}mmP>-<}h!H@70gkK>1 zBf=EI?-2fiz=3d?cR*+@3!AwIY3%09E*2Y*W=CMB3A5A8=rWti_9Jea=(?k-NvJ*2 z6qx~tT9*7((%0%%_+{~^)7~!qJMh4V^ZY8%_ErHeHun+&KQX#7USaevQcEF%mYW~e zXTc|uR!A};+;mtDBWUS*sNUVtgsMh1Oko@BaXeY)W+i delta 10848 zcmbVS3w%>mn!o2JH_s+bpDBI6AuXk)v^>kB6w0Gepe-mUgpiv;OPX{}ZslQWkcXn8 ziyl!%QGDWyagZ?M=*TEK;0PnOOl3QFWtN{a&TD7ZI_hurx4Ywh-$|OZ6ldMNq<_x& zUg!0ld%o`^A3vh`a-YWfoW)`i;FWZ z-8?ySuduFO&H|h-&yjP0)*#Q7^WgslM&Ww@Y}(=xn3^u)XJssHy-{bz@NjjST6G{ZcmWQ_e$%Q z$P=M!OXUi{CV3LzGPx3Pxm*Rk7_2(&@(hQ95Co3iKT4xGNN=|3_<>@W5&}+EO zAPcSF>sZ=Gwh7xcYlUqjLTeR@1m*u_I`r+P9VyLpCUi%YTswQHM@@0}1QdUl)9dYW zc2oZ?9kPe2M$dM)r(5ap`&6mJ*WIJ2i7r|0K!VHbbkWU$Hr3$j?rH7CRf0t znvr&khpq~ssvc`wJv&q*^>lkX+%5&0YgxE**^-9(wuR36l}ns$E1DKIt4a0EZHrr0 ztX$H-iA@XFs@8@Tix;*;grL5Y&CALqJDD>p6L4qNqL3q1)p$KVx&X(L>+kmXoURV1 zdy7kPy8T_T-KFx!hY-Q@fvb zLDyhj9Y#L?bOW|?Gr~;>POjngZ{F;o&UPRBd-}MLN<0B0(DR2MT8vV9l!7tP?NGZL z{sOPTCfOwnC09$9!JNE4L*}5_HY5;Z)nHj=UqbHTZT$)3uS?2BsbyqvdJ~BC<<;~j z)DD_ckvM&DQfM0L-rAo~ejVLU%+~#(eskV{IS<7bkinV;ZlkI{Ve(aM-ez1yuG-QL zr#@ggXcM1K$X7t-C`fL@0N!d?GeMu$oxl&bq%Ji!Bq_uBuTYuH+fvVM`j zv881>v$RU$Fo!F}D#yjquT3H-wir{O8A=j+s%&AA(N|U^052`hlZ}c!Uh7PVzR>Q> z?E&h}#G9J^zKZ!?zk4hA==ywzg>C^lUBJ$ldvyUUDy(ke3f%=L29ntQ6H`}BgcTgH zRh%G~Y?J$JlYi8D+CDI2Rex>EKy6Fk>NS^Y*Ywq{xssfAsH;D@cp$m>d2%V)(U(*~wA^=CBor!E^vUB=QYer%E&48l8xl7>nR`{9&y_Lqu3I@e?K zxanTdM0R0};Lt{1B}T>L^>ld@x`TIHO%1r(Jx(`u1-3ZhHRh)ZjLIR=eQ4~YYnet0g6sJE7TB_0Qa&PhQ z7qE_c0zF=Z9%iYvvxzp`Qu_+2egsWx_^9a-Yzvpzp`(vud9*G17}5^2+G#ULCi73b zSotFK`;hc7o?IgQY4A9_3y$>;AxNGf_Yv?IP#JzY+12C0}?H-K)A8zBPQ zSdczZu@3hP0xsw=1YBZJYL||aNWeGD?bQW!Y~H+d*7ufm0)g0!AIZtXY(aq2en+o< zTf6Qb^{N?qrQZz!L$Cfe zeLR40XqsOTRhZ;SZ69z6vQlp}2KtVz=;NYcng zg_3IB;_}H}kF(v?;f097VU0V7Y6?ITiWBY8$JwW|=G&e`<)a7(5bk94b#^UIH(Os< zTjc=94!i^K)h=N$ZQOp-U{czC-H@1TOdU+f+~0h7L0>}o6O90o54&2IWtJ>;&rTaC znlmIwOgFn+b9G!*iP{eDjOlmZD3#P0}=L`X>4)yxW>%L+d;H;)v( zicP(S@H)az0n`MyzsCoY=HKe^1?U#``+1eN(IWI!R#IOqodM~VaC7|uk!)fA(r`(- z2!xH{lMCJ=Wp6?eb}xVV)Iym~MS@6ChoT%z0B1hTjx=WK-bL~+*c*+d(j_Pc*&iCW zg#L6R>|*zbvm>6MWP;Gck-Z^8Mj-`_L6c05=zGmUGh{bnvqP^YP><3>A?^z7@VO%) zlLg`&w}<2sily9{%1{6Ql7a#D*a!291%`N}wNN4>9hdml1eSg!=TK z0n~&oJv`zoX# z?8e2mnvZ~e2RLM)0tygAWeej<22(Nz$CVDIp$7RX`hK~mz9 zW@cM5nay2VJuYD@3hM#ZM825cqbu8B2cxP!s(;!-0e*>M2P;MGs;1i&I!ZbDHT3AUuJ-VPf-| z-wda&SSx06eRMP$Ib?l3T~4>l>kZJ$;r*-LBji)o(z=P**^gRh*d9c=xW;An_txoT zJ)726y#`;h5E{k%6thXcL@nF|s!8z#AfMspxvd`{>xam)z>(hV_XQ%Gjc!B6e+ul) zwsJ`aT{eb4ZF7?R!&2m)At)>m4vOnf^b+t667@o>EV9u2^ipudkKiUlBMV-!SM)VP z))`Cq`4Kk*PIDuh_-ZEmW~-HX3ryCL2WY2({cX+oG*DeD__V_*Y*2I=Wh2_xg8e)! zu%z~MrrcoHB!d&~T9?c2*_pupUSZ<&&6O&7;p@XdiTjpH9ng$z>Tu z$76;+F6)8MVW0`o4f%aOI*T{4%hszUf^YGd5zfL94r{{|4eYkhwr1l{myI6ku#7Cp zI$19#Y?iK#qG29mEsdNIxGvDfW?}D*HukrQ$cz-PwY42j8a-fF;z4rY0lt$lKoscP zd=Y>j6jNwz}^uXL7{9PJqgD%t{F`Vs^!V!V11*H zD|N(t3v#AXDlkhyPVzzVz&aRqR&?cTN;z{SXQaB)fzb_&>=>gm!pH!|9l*$mF{VTq znZP&#jNB;0s!T(M&Xo;}lkA(WH1_pP8JT&_sxE*h{tngJy0E#eWi7vhSlzOWjlbC+ zK6bN@WT=KfPqzyXD@QK%;&OB$nApq>vn|vIWmVhZ>rm)t?EVe*v^~hQ!X~7N2`5OG9(G-FfhW?<;sB z`NM8(7@-E7CeRD#2pnkII>|PeQ8FaVFy>GE3~=K9#e<3V!_zM%7WKhr{It{6Z_Ml) zzYsp**PN*udx7K0%x5wWTE89&5D-VriC=xGu+KM@g~lzNE*#Moh))nGJXtSblu{r< z`AiYPa`AkWcxR4ofh7K1i$Xt_>cxd7>8u{5&Ke1pP4&r8IhUhrEEdldq13tJdU26M zdbbp%-km_O>}aq+<@|J0lTmX)6q~e~3prW|D((@lCjl?4@x=W6qKOskn8m(u_mKnP zv+_PdjQ^3?qg}{9&(bO&LG_7{rn<#jZk$a8P@|%`<0s1rI;+ z1h!(=OdGK7iCpvr^|V--#a`=Lo`=V4^nt9Rpf^pKJ6wmveI6_Ey{t>cwf}(q+BeON z@gFr{-O953dq^%j>aR55A>knOL3Y_+S%eh~(xT^te^V!oogZ`w>gTWt-7_=bA0yfm077Afgcn1LY zif~+d(3e$BK%ul4TRw#x8^5{h_6L;Zk!+ckAg>5vCF;neHC1OYu`WA%tur1S3w*Ex6IgqfLH6m*|AsJdaC;8$%xk8x+%% zNWc(Ff9UtHb=;@?x{dCIk|RBm@4tf7_>{(T<_fek384%!&QHJBsKe=l3~3gfW(d~%=Vodu?K-|B0NTgv~;Gz~XZt2eHX;(_83?&L_6`u))wDd0! zARc8|+wV(Cd~DK*`aZ+de#6uO!&E4+*S8lgm?6BaUzO8ZD*mciY?~qcsdvBlYsKQy~FVksQitGTK!O2*yMyNr^W`DT7dODxxnOM(3s6&{I zkc%)K0X>_}L70m$4`DuAv%5CbfW-x9`!)pL=pv*oL|BZl1R>_Nv=OV^QTcY__gtkw zQ_bjZfgZOToH^ofO(+U?2ocw#%dix8H}o%e+`;16H+MgWEuL~mu4m#-H_Fun+Jbgh zA+#d2AuLDWf#!p(Yfn+;i#R$29lSRKuV^C%X`WbmvLW2L|<$Q z+xAvyvaaieY3W~o^<^@1-IicYS`fYOtY;&N%?znUBbv!g{|I)b`!kO}> zS<5Gg@0W@zGKBXhn3|_b?^n-go-AD~*8%0?WC3#6jlY2m zzPP8c^fLg5Av&W-;P2wMuylI~Vjf{r4+Nx*K-tb- zKCqtrf+gJR)!eG+S^p!M?9jdIHjKQ+g=-;9$%(8xq8WK9Mvrhefed_) zoE*et`CwMaK4$b$2^e$A*fGoY5r#*O za)jM$I~J7WjOgs7Dg`5E26POu&dfcS78-s_yX8!5CZLtGzGX(T$1HHpaqG1W%eh>8 z>;jL`hQa0>x6QeBh2^{;d>upK_wm!x!^#fjbVV~DemtSI$aRSqOPdhf2-i<{I13?!=h!$1VRi{u^NRbl*+b$= zQ7XA&PU}mb@Pmqe^Yj7p^dX^MWFH)wLUu6CLv_=xB%}PLs~7|!Yz8FEp5`$YTYARqS9(m4w7f#*NIL%Tdcz7NOFEHPu_L`TFtc{!=JZi@KAyo_4@~!|J#r&v< zq;vP65<*(|wMRb|bvy!5*prX%PlyL2Fw3<^%gJ`;Iy!+U?Czs0OaYuSgxv6Nk6zUz zxzSEMZS@Ik;OSD+y(opSB&>brG_k871>gSgh2U3KJQC_=gU=S5A41s(hv|>ikegZE zu@A}SaPo5+qU%TVAF!L%$eUUznirV%LpUlr$=tluseUhzw@m})1@NZu1BK2s39zl2vVI0Dnylw2c zmfN65k&4}*PXK^x7H~UAc$ytJej^F8-@KR|CO^<=Lbwm;*9iFP;g`x3XMui#uoZQt zLrIn3Hw22ykr0^yJ}rKvYkinA@VR*gg^vTM2B#CrPA9+8jC7Ds=gYu_UKXIH6@Q{o z7X6I^=4DT!Dtz?_iq~<3lkCKa0&++A(usDm{4m-v(o*c>c!UWExd`6}7L9da-O zxMIXI;FQx7tmn1bjeC%dYoi+AP9N_1eLEtaA_d_0Bns^v?ipX~c*j)9?S(HK;F~HJ z8smJ5QRX<472@v6$9Ijdod6jKJI|o# z+X%6t(s!_m`+@!(A?C~UEL0t4)z}$;AGV;EP~OrxlzJE87YOemoCi?No$Q@6_Bjlb4*qnT&1DzfEg zvvoY?-o^Z9_Y)uc>g<;00u(4h*n?6$PbE+Sk)_z2-Mgf9S8O$S^c=+N$Nr14aqr&AcrQ;gpu z2Nr*I2e8UtvwXwhmWZr4IvGX3p|~utJK2VJ?WRwl3?CtIA{=@*Q!}?lFeeV0tb-ZZ zgQmp6)bv4XGJJ*-XA#SgFdF8G^F@*}B#gk7?7z>u(ueGJk~t)dhUskEg_#-cgk%i~ zV{i?7?}A)AhrkbbVlZE97O~M0xJm1T%zsE2feV=Dmxal#B7wWc5$I*#|K)rC0}9bO Avj6}9 diff --git a/litellm/integrations/__pycache__/supabase.cpython-311.pyc b/litellm/integrations/__pycache__/supabase.cpython-311.pyc index 3a77f3a0393e6b1298cd9a1aa315cf827b1849bf..43b7b234c0f2b2cff24a3475ade34adf23bc62f4 100644 GIT binary patch delta 1468 zcmZ{jO>7%Q7=~we?e(s`fBp$cO%m2_jkCq7!A41wwzP%@(F!U^nwC<)+GLjCYGYfw z>j2hT6vQDtAVr!(k!VFvZ4g{gFNhl;q#j)Z!|Fk>2=)RBoC73!n>h#GqO{0-f+-!^7x4kU#V7{itZRt3tEq2;SRTE*tWiE*d5NO!g9kltPX2Cy3?>bvRQEq z(_AaB=%!h&z$OcIN1szQ7M)j$j|b~krRr$BmBd26nl(+L8G>0NF8Ueeb;DY&*#`Pm zNxjSCd9~lHhe?*~AFyoJR?Eh^QFWSQi;cRztlP$c|EwUKZf{{+1YJ7LM?GQB#tWXZ zbjxTfOSs=8iHjZy*eh&jN zwweFv$<6#`{55tns?O>DzmJKPcF@s`p36WG(21a1UL z;&jXnK@Rd)xR!{`$HA~Gq6Kl4646)U%xh9hCYKK*zC;reiO4?^SeAxCT=##{*sAws zx(u!(LTPxNk$#zHAz*Q&ZRUy82G7VX6MrlNnuIt3ks&&*hU?fcdR3LfId{l&MHHVn%yy%ECe{TQZ6(!BATf5~ zSuZ~O<*T={+ea4Kanik1dh_HDnSwW=ZDqG73SK(rsZ)ay4j&-(dN>7QT01(`>*i26 zKAi~9OW<3nbQu49C#B{K%qe^;r{kFs@0qq;zjC3G?hpv&k&I!0r^}1#T~}?)-a8ytn)Wv;v4GdX_~?!!+F=A-+92QulhGD C?qLT2 delta 703 zcmexi{8Wc;IWI340}wbpxu0@PW+UHsCPtOXY|N@G!3>&gn@yP|I2AcS@=(AIq?v*E z^C>0#u3;%- zD5|UBu3<@Gk%St?z)-_A*+9UIFNI?cTP+XBs1(i`p2-yg>5M#+9|)K`^49XDFa$HC zaDlbwU}~>n0vgDLWFT`2H+Hi`VP*l%Q)XZUJ3NJFvWAc=Bk$xqp?oGyw#hGq6clc8 zpO&?7}^Ai@$v zSb+#z5MeWUwXo&nSG;<0oFzqx$*D<+$=SEK((;RP6HDS#E0Q(&i=;s6zh}{YxqC#va1SxU;q*q1Qb6AOwJL_n*3T+28(zcFcgI}F9>S<_@FYmMa)cu z!^Mu_Fpr}ikUC None: + def __init__(self, encoding, logging_obj, api_key=None) -> None: self.encoding = encoding + self.logging_obj = logging_obj self.validate_environment(api_key=api_key) def validate_environment( @@ -74,18 +74,10 @@ class HuggingfaceRestAPILLM: optional_params["max_new_tokens"] = value data = { "inputs": prompt, - # "parameters": optional_params + "parameters": optional_params } ## LOGGING - logging( - model=model, - input=prompt, - additional_args={ - "litellm_params": litellm_params, - "optional_params": optional_params, - }, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=self.api_key, additional_args={"complete_input_dict": data}) ## COMPLETION CALL response = requests.post( completion_url, headers=self.headers, data=json.dumps(data) @@ -94,17 +86,7 @@ class HuggingfaceRestAPILLM: return response.iter_lines() else: ## LOGGING - logging( - model=model, - input=prompt, - additional_args={ - "litellm_params": litellm_params, - "optional_params": optional_params, - "original_response": response.text, - }, - logger_fn=logger_fn, - ) - print_verbose(f"raw model_response: {response.text}") + logging.post_call(input=prompt, api_key=self.api_key, original_response=response.text, additional_args={"complete_input_dict": data}) ## RESPONSE OBJECT completion_response = response.json() print_verbose(f"response: {completion_response}") diff --git a/litellm/main.py b/litellm/main.py index d3e53f4c1..a01bea4ad 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -6,11 +6,11 @@ from copy import deepcopy import litellm from litellm import ( # type: ignore client, - logging, exception_type, timeout, get_optional_params, get_litellm_params, + Logging ) from litellm.utils import ( get_secret, @@ -85,6 +85,7 @@ def completion( azure=False, custom_llm_provider=None, custom_api_base=None, + litellm_call_id=None, # model specific optional params # used by text-bison only top_k=40, @@ -129,8 +130,9 @@ def completion( verbose=verbose, custom_llm_provider=custom_llm_provider, custom_api_base=custom_api_base, + litellm_call_id=litellm_call_id ) - + logging = Logging(model=model, messages=messages, optional_params=optional_params, litellm_params=litellm_params) if custom_llm_provider == "azure": # azure configs openai.api_type = "azure" @@ -144,16 +146,14 @@ def completion( if litellm.api_version is not None else get_secret("AZURE_API_VERSION") ) + if not api_key and litellm.azure_key: + api_key = litellm.azure_key + elif not api_key and get_secret("AZURE_API_KEY"): + api_key = get_secret("AZURE_API_KEY") # set key - openai.api_key = api_key or litellm.azure_key or get_secret("AZURE_API_KEY") + openai.api_key = api_key ## LOGGING - logging( - model=model, - input=messages, - additional_args=optional_params, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=messages, api_key=openai.api_key, additional_args={"headers": litellm.headers, "api_version": openai.api_version, "api_base": openai.api_base}) ## COMPLETION CALL if litellm.headers: response = openai.ChatCompletion.create( @@ -166,6 +166,8 @@ def completion( response = openai.ChatCompletion.create( model=model, messages=messages, **optional_params ) + ## LOGGING + logging.post_call(input=messages, api_key=openai.api_key, original_response=response, additional_args={"headers": litellm.headers, "api_version": openai.api_version, "api_base": openai.api_base}) elif ( model in litellm.open_ai_chat_completion_models or custom_llm_provider == "custom_openai" @@ -182,18 +184,15 @@ def completion( if litellm.organization: openai.organization = litellm.organization # set API KEY - openai.api_key = ( - api_key or litellm.openai_key or get_secret("OPENAI_API_KEY") - ) + if not api_key and litellm.openai_key: + api_key = litellm.openai_key + elif not api_key and get_secret("AZURE_API_KEY"): + api_key = get_secret("OPENAI_API_KEY") + + openai.api_key = api_key ## LOGGING - logging( - model=model, - input=messages, - additional_args=args, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=messages, api_key=api_key, additional_args={"headers": litellm.headers, "api_base": api_base}) ## COMPLETION CALL if litellm.headers: response = openai.ChatCompletion.create( @@ -206,6 +205,8 @@ def completion( response = openai.ChatCompletion.create( model=model, messages=messages, **optional_params ) + ## LOGGING + logging.post_call(input=messages, api_key=api_key, original_response=response, additional_args={"headers": litellm.headers}) elif model in litellm.open_ai_text_completion_models: openai.api_type = "openai" openai.api_base = ( @@ -214,20 +215,19 @@ def completion( else "https://api.openai.com/v1" ) openai.api_version = None - openai.api_key = ( - api_key or litellm.openai_key or get_secret("OPENAI_API_KEY") - ) + # set API KEY + if not api_key and litellm.openai_key: + api_key = litellm.openai_key + elif not api_key and get_secret("AZURE_API_KEY"): + api_key = get_secret("OPENAI_API_KEY") + + openai.api_key = api_key + if litellm.organization: openai.organization = litellm.organization prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - additional_args=optional_params, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=api_key, additional_args={"openai_organization": litellm.organization, "headers": litellm.headers, "api_base": openai.api_base, "api_type": openai.api_type}) ## COMPLETION CALL if litellm.headers: response = openai.Completion.create( @@ -237,19 +237,10 @@ def completion( ) else: response = openai.Completion.create(model=model, prompt=prompt) - completion_response = response["choices"][0]["text"] ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) + logging.post_call(input=prompt, api_key=api_key, original_response=response, additional_args={"openai_organization": litellm.organization, "headers": litellm.headers, "api_base": openai.api_base, "api_type": openai.api_type}) ## RESPONSE OBJECT + completion_response = response["choices"][0]["text"] model_response["choices"][0]["message"]["content"] = completion_response model_response["created"] = response["created"] model_response["model"] = model @@ -278,13 +269,7 @@ def completion( input["max_length"] = max_tokens # for t5 models input["max_new_tokens"] = max_tokens # for llama2 models ## LOGGING - logging( - model=model, - input=input, - custom_llm_provider=custom_llm_provider, - additional_args={"max_tokens": max_tokens}, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=replicate_key, additional_args={"complete_input_dict": input, "max_tokens": max_tokens}) ## COMPLETION CALL output = replicate.run(model, input=input) if "stream" in optional_params and optional_params["stream"] == True: @@ -297,16 +282,8 @@ def completion( response += item completion_response = response ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) + logging.post_call(input=prompt, api_key=replicate_key, original_response=completion_response, additional_args={"complete_input_dict": input, "max_tokens": max_tokens}) + ## USAGE prompt_tokens = len(encoding.encode(prompt)) completion_tokens = len(encoding.encode(completion_response)) ## RESPONSE OBJECT @@ -327,6 +304,7 @@ def completion( encoding=encoding, default_max_tokens_to_sample=litellm.max_tokens, api_key=anthropic_key, + logging_obj = logging # model call logging done inside the class as we make need to modify I/O to fit anthropic's requirements ) model_response = anthropic_client.completion( model=model, @@ -362,13 +340,7 @@ def completion( "OR_API_KEY" ) ## LOGGING - logging( - model=model, - input=messages, - additional_args=optional_params, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=messages, api_key=openai.api_key) ## COMPLETION CALL if litellm.headers: response = openai.ChatCompletion.create( @@ -395,6 +367,8 @@ def completion( }, **optional_params, ) + ## LOGGING + logging.post_call(input=messages, api_key=openai.api_key, original_response=response) elif model in litellm.cohere_models: # import cohere/if it fails then pip install cohere install_and_import("cohere") @@ -409,31 +383,17 @@ def completion( co = cohere.Client(cohere_key) prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=cohere_key) ## COMPLETION CALL response = co.generate(model=model, prompt=prompt, **optional_params) if "stream" in optional_params and optional_params["stream"] == True: # don't try to access stream object, response = CustomStreamWrapper(response, model) return response - - completion_response = response[0].text ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) + logging.post_call(input=prompt, api_key=cohere_key, original_response=response) + ## USAGE + completion_response = response[0].text prompt_tokens = len(encoding.encode(prompt)) completion_tokens = len(encoding.encode(completion_response)) ## RESPONSE OBJECT @@ -457,7 +417,7 @@ def completion( or os.environ.get("HUGGINGFACE_API_KEY") ) huggingface_client = HuggingfaceRestAPILLM( - encoding=encoding, api_key=huggingface_key + encoding=encoding, api_key=huggingface_key, logging_obj=logging ) model_response = huggingface_client.completion( model=model, @@ -492,12 +452,7 @@ def completion( ) # TODO: Add chat support for together AI ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=TOGETHER_AI_TOKEN) if stream == True: return together_ai_completion_streaming( { @@ -519,17 +474,7 @@ def completion( headers=headers, ) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": res.text, - }, - logger_fn=logger_fn, - ) - + logging.post_call(input=prompt, api_key=TOGETHER_AI_TOKEN, original_response=res.text) # make this safe for reading, if output does not exist raise an error json_response = res.json() if "output" not in json_response: @@ -562,16 +507,7 @@ def completion( prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "litellm_params": litellm_params, - "optional_params": optional_params, - }, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=None) chat_model = ChatModel.from_pretrained(model) @@ -579,16 +515,7 @@ def completion( completion_response = chat.send_message(prompt, **optional_params) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) + logging.post_call(input=prompt, api_key=None, original_response=completion_response) ## RESPONSE OBJECT model_response["choices"][0]["message"]["content"] = completion_response @@ -607,27 +534,13 @@ def completion( prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=None) + vertex_model = TextGenerationModel.from_pretrained(model) completion_response = vertex_model.predict(prompt, **optional_params) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) - + logging.post_call(input=prompt, api_key=None, original_response=completion_response) ## RESPONSE OBJECT model_response["choices"][0]["message"]["content"] = completion_response model_response["created"] = time.time() @@ -641,12 +554,7 @@ def completion( prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=ai21.api_key) ai21_response = ai21.Completion.execute( model=model, @@ -655,16 +563,7 @@ def completion( completion_response = ai21_response["completions"][0]["data"]["text"] ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) + logging.post_call(input=prompt, api_key=ai21.api_key, original_response=completion_response) ## RESPONSE OBJECT model_response["choices"][0]["message"]["content"] = completion_response @@ -678,7 +577,8 @@ def completion( prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn) + logging.pre_call(input=prompt, api_key=None, additional_args={"endpoint": endpoint}) + generator = get_ollama_response_stream(endpoint, model, prompt) # assume all responses are streamed return generator @@ -693,12 +593,7 @@ def completion( prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=base_ten_key) base_ten__model = baseten.deployed_model_version_id(model) @@ -708,16 +603,8 @@ def completion( if type(completion_response) == dict: completion_response = completion_response["generated_text"] - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": completion_response, - }, - logger_fn=logger_fn, - ) + ## LOGGING + logging.post_call(input=prompt, api_key=base_ten_key, original_response=completion_response) ## RESPONSE OBJECT model_response["choices"][0]["message"]["content"] = completion_response @@ -734,26 +621,14 @@ def completion( prompt = " ".join([message["content"] for message in messages]) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) + logging.pre_call(input=prompt, api_key=None, additional_args={"url": url, "max_new_tokens": 100}) + response = requests.post( url, data={"inputs": prompt, "max_new_tokens": 100, "model": model} ) ## LOGGING - logging( - model=model, - input=prompt, - custom_llm_provider=custom_llm_provider, - additional_args={ - "max_tokens": max_tokens, - "original_response": response, - }, - logger_fn=logger_fn, - ) + logging.post_call(input=prompt, api_key=None, original_response=response.text, additional_args={"url": url, "max_new_tokens": 100}) + completion_response = response.json()["outputs"] # RESPONSE OBJECT @@ -762,13 +637,6 @@ def completion( model_response["model"] = model response = model_response else: - ## LOGGING - logging( - model=model, - input=messages, - custom_llm_provider=custom_llm_provider, - logger_fn=logger_fn, - ) args = locals() raise ValueError( f"Unable to map your input to a model. Check your input - {args}" @@ -776,14 +644,7 @@ def completion( return response except Exception as e: ## LOGGING - logging( - model=model, - input=messages, - custom_llm_provider=custom_llm_provider, - additional_args={"max_tokens": max_tokens}, - logger_fn=logger_fn, - exception=e, - ) + logging.post_call(input=messages, api_key=api_key, original_response=e) ## Map to OpenAI Exception raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e @@ -825,7 +686,7 @@ def embedding(model, input=[], azure=False, force_timeout=60, logger_fn=None): openai.api_version = get_secret("AZURE_API_VERSION") openai.api_key = get_secret("AZURE_API_KEY") ## LOGGING - logging(model=model, input=input, azure=azure, logger_fn=logger_fn) + logging.pre_call(model=model, input=input, azure=azure, logger_fn=logger_fn) ## EMBEDDING CALL response = openai.Embedding.create(input=input, engine=model) print_verbose(f"response_value: {str(response)[:50]}") diff --git a/litellm/tests/test_supabase_integration.py b/litellm/tests/test_supabase_integration.py index 882d0bbc6..2326bcfdf 100644 --- a/litellm/tests/test_supabase_integration.py +++ b/litellm/tests/test_supabase_integration.py @@ -1,27 +1,28 @@ -# #### What this tests #### -# # This tests if logging to the helicone integration actually works -# # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this -# import sys, os -# import traceback -# import pytest +#### What this tests #### +# This tests if logging to the helicone integration actually works +# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this +import sys, os +import traceback +import pytest -# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path -# import litellm -# from litellm import embedding, completion +sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +import litellm +from litellm import embedding, completion -# litellm.success_callback = ["supabase"] -# litellm.failure_callback = ["supabase"] +litellm.input_callback = ["supabase"] +litellm.success_callback = ["supabase"] +litellm.failure_callback = ["supabase"] -# litellm.modify_integration("supabase",{"table_name": "litellm_logs"}) +litellm.modify_integration("supabase",{"table_name": "test_table"}) -# litellm.set_verbose = True +litellm.set_verbose = True -# user_message = "Hello, how are you?" -# messages = [{ "content": user_message,"role": "user"}] +user_message = "Hello, how are you?" +messages = [{ "content": user_message,"role": "user"}] -# #openai call -# response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}]) +#openai call +response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}]) -# #bad request call -# response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}]) +#bad request call +response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}]) diff --git a/litellm/utils.py b/litellm/utils.py index 5346ce62a..d340c2df3 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -135,48 +135,105 @@ def install_and_import(package: str): ####### LOGGING ################### # Logging function -> log the exact model details + what's being sent | Non-Blocking -def logging( - model=None, - input=None, - custom_llm_provider=None, - azure=False, +class Logging: + def __init__(self, model, messages, optional_params, litellm_params): + self.model = model + self.messages = messages + self.optional_params = optional_params + self.litellm_params = litellm_params + self.logger_fn = litellm_params["logger_fn"] + self.model_call_details = { + "model": model, + "messages": messages, + "optional_params": self.optional_params, + "litellm_params": self.litellm_params, + } + + def pre_call(self, input, api_key, additional_args={}): + try: + print(f"logging pre call for model: {self.model}") + self.model_call_details["input"] = input + self.model_call_details["api_key"] = api_key + self.model_call_details["additional_args"] = additional_args + + ## User Logging -> if you pass in a custom logging function + print_verbose( + f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" + ) + if self.logger_fn and callable(self.logger_fn): + try: + self.logger_fn( + self.model_call_details + ) # Expectation: any logger function passed in by the user should accept a dict object + except Exception as e: + print_verbose( + f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}" + ) + + ## Input Integration Logging -> If you want to log the fact that an attempt to call the model was made + for callback in litellm.input_callback: + try: + if callback == "supabase": + print_verbose("reaches supabase for logging!") + model = self.model + messages = self.messages + print(f"litellm._thread_context: {litellm._thread_context}") + supabaseClient.input_log_event( + model=model, + messages=messages, + end_user=litellm._thread_context.user, + litellm_call_id=self.litellm_params["litellm_call_id"], + print_verbose=print_verbose, + ) + pass + except: + pass + except: + print_verbose( + f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}" + ) + pass + + def post_call(self, input, api_key, original_response, additional_args={}): + # Do something here + try: + self.model_call_details["input"] = input + self.model_call_details["api_key"] = api_key + self.model_call_details["original_response"] = original_response + self.model_call_details["additional_args"] = additional_args + + ## User Logging -> if you pass in a custom logging function + print_verbose( + f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" + ) + if self.logger_fn and callable(self.logger_fn): + try: + self.logger_fn( + self.model_call_details + ) # Expectation: any logger function passed in by the user should accept a dict object + except Exception as e: + print_verbose( + f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}" + ) + except: + print_verbose( + f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}" + ) + pass + + # Add more methods as needed + + +def exception_logging( additional_args={}, logger_fn=None, exception=None, ): try: model_call_details = {} - if model: - model_call_details["model"] = model - if azure: - model_call_details["azure"] = azure - if custom_llm_provider: - model_call_details["custom_llm_provider"] = custom_llm_provider if exception: model_call_details["exception"] = exception - if input: - model_call_details["input"] = input - - if len(additional_args): - model_call_details["additional_args"] = additional_args - # log additional call details -> api key, etc. - if model: - if ( - azure == True - or model in litellm.open_ai_chat_completion_models - or model in litellm.open_ai_chat_completion_models - or model in litellm.open_ai_embedding_models - ): - model_call_details["api_type"] = openai.api_type - model_call_details["api_base"] = openai.api_base - model_call_details["api_version"] = openai.api_version - model_call_details["api_key"] = openai.api_key - elif "replicate" in model: - model_call_details["api_key"] = os.environ.get("REPLICATE_API_TOKEN") - elif model in litellm.anthropic_models: - model_call_details["api_key"] = os.environ.get("ANTHROPIC_API_KEY") - elif model in litellm.cohere_models: - model_call_details["api_key"] = os.environ.get("COHERE_API_KEY") + model_call_details["additional_args"] = additional_args ## User Logging -> if you pass in a custom logging function or want to use sentry breadcrumbs print_verbose( f"Logging Details: logger_fn - {logger_fn} | callable(logger_fn) - {callable(logger_fn)}" @@ -206,10 +263,10 @@ def client(original_function): try: global callback_list, add_breadcrumb, user_logger_fn if ( - len(litellm.success_callback) > 0 or len(litellm.failure_callback) > 0 + len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 or len(litellm.failure_callback) > 0 ) and len(callback_list) == 0: callback_list = list( - set(litellm.success_callback + litellm.failure_callback) + set(litellm.input_callback + litellm.success_callback + litellm.failure_callback) ) set_callbacks( callback_list=callback_list, @@ -299,13 +356,16 @@ def client(original_function): result = None try: function_setup(*args, **kwargs) - ## MODEL CALL + litellm_call_id = str(uuid.uuid4()) + kwargs["litellm_call_id"] = litellm_call_id + ## [OPTIONAL] CHECK CACHE start_time = datetime.datetime.now() if (litellm.caching or litellm.caching_with_models) and ( cached_result := check_cache(*args, **kwargs) ) is not None: result = cached_result else: + ## MODEL CALL result = original_function(*args, **kwargs) end_time = datetime.datetime.now() ## Add response to CACHE @@ -399,6 +459,7 @@ def get_litellm_params( together_ai=False, custom_llm_provider=None, custom_api_base=None, + litellm_call_id=None, ): litellm_params = { "return_async": return_async, @@ -408,6 +469,7 @@ def get_litellm_params( "verbose": verbose, "custom_llm_provider": custom_llm_provider, "custom_api_base": custom_api_base, + "litellm_call_id": litellm_call_id } return litellm_params @@ -567,7 +629,8 @@ def set_callbacks(callback_list): global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient try: for callback in callback_list: - if callback == "sentry" or "SENTRY_API_URL" in os.environ: + print(f"callback: {callback}") + if callback == "sentry": try: import sentry_sdk except ImportError: @@ -623,6 +686,7 @@ def set_callbacks(callback_list): elif callback == "berrispend": berrispendLogger = BerriSpendLogger() elif callback == "supabase": + print(f"instantiating supabase") supabaseClient = Supabase() except Exception as e: raise e @@ -743,7 +807,6 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args, k "completion_tokens": 0, }, } - print(f"litellm._thread_context: {litellm._thread_context}") supabaseClient.log_event( model=model, messages=messages, @@ -751,9 +814,9 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args, k response_obj=result, start_time=start_time, end_time=end_time, + litellm_call_id=kwargs["litellm_call_id"], print_verbose=print_verbose, ) - except: print_verbose( f"Error Occurred while logging failure: {traceback.format_exc()}" @@ -769,7 +832,7 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args, k pass except Exception as e: ## LOGGING - logging(logger_fn=user_logger_fn, exception=e) + exception_logging(logger_fn=user_logger_fn, exception=e) pass @@ -849,11 +912,12 @@ def handle_success(args, kwargs, result, start_time, end_time): response_obj=result, start_time=start_time, end_time=end_time, + litellm_call_id=kwargs["litellm_call_id"], print_verbose=print_verbose, ) except Exception as e: ## LOGGING - logging(logger_fn=user_logger_fn, exception=e) + exception_logging(logger_fn=user_logger_fn, exception=e) print_verbose( f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}" ) @@ -864,7 +928,7 @@ def handle_success(args, kwargs, result, start_time, end_time): pass except Exception as e: ## LOGGING - logging(logger_fn=user_logger_fn, exception=e) + exception_logging(logger_fn=user_logger_fn, exception=e) print_verbose( f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}" ) @@ -912,15 +976,6 @@ def exception_type(model, original_exception, custom_llm_provider): exception_type = type(original_exception).__name__ else: exception_type = "" - logging( - model=model, - additional_args={ - "error_str": error_str, - "exception_type": exception_type, - "original_exception": original_exception, - }, - logger_fn=user_logger_fn, - ) if "claude" in model: # one of the anthropics if hasattr(original_exception, "status_code"): print_verbose(f"status_code: {original_exception.status_code}") @@ -1030,7 +1085,7 @@ def exception_type(model, original_exception, custom_llm_provider): raise original_exception except Exception as e: ## LOGGING - logging( + exception_logging( logger_fn=user_logger_fn, additional_args={ "exception_mapping_worked": exception_mapping_worked,