From ec5e7aa4a9b8a8e5afcce2d42418b4ea8883db04 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 13 Oct 2023 21:56:43 -0700 Subject: [PATCH] fix(openai.p): adding support for exception mapping for openai-compatible apis via http calls --- litellm/__pycache__/main.cpython-311.pyc | Bin 52044 -> 52044 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 160913 -> 163527 bytes litellm/llms/openai.py | 77 +- litellm/main.py | 2 +- litellm/proxy/api_log.json | 4797 +++++++++++++++++++++ litellm/proxy/proxy_server.py | 3 - litellm/tests/test_completion.py | 22 + litellm/utils.py | 74 +- 8 files changed, 4943 insertions(+), 32 deletions(-) diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index f25446d36e4017801e793acd4652bfaeb20d014a..6da6a6e021e77565fe0bdca00a6bc0e331b2554c 100644 GIT binary patch delta 59 zcmX>zjrq(pX5Qtzyj%=GpeLo3DzK4vy%JN6)Z_zNl9Tm~y*4Wzjrq(pX5Qtzyj%=GV0u(Dm31TUdZo!f~(*-wdb0h^~sO zNN{A^0~HlDEQ$ft582h_@Bk4qi6g|pa45neqCr7fpZH!?PY^_Rf4{dw_)m3pb#--D zb$4~o`tyDtT=NThIWW*)!jE4}U+Y|dd3(@=Bk;L%_K_3vC)uLGi4YEbMkWn)wh8Im_p&Je|gLB2XfbcRMap}__H9!@sWFSqI9vmE(;L@jK!FLrjf$jDN$su%k+b!_(eoGSerm^@5nM`M^E6Pt}@fW2S=OtH^ ztg+~wv94HW;I$ylDvcw;S?uZs5zb|<O6r@k%Nc~4u2_5m`2w~S;a=c{dgu+;f&SCn411f!r;R4q1wNrx86mJsLqHlx$O zy@%F_wWFOq+oRHCiwWrgw~WMm(@12C871OL!g;CDG+e0iL6+f`MqxKKVl?0eSgmUS zOKA6)MmqZQ{>2c<9Q~aS|8JxQq;7`EG4Og*NwS1DNKZ+Y2&dZK69(}g{|UXuMo#(( zG87*aQfR(Cft5?_j=I{ix_Yavyk=!^d3{ZKy;Z4SWL;k4Sdm^{<1tOnpE7wu{)~cY zrMU%VxswaZ#^;yr^!vCGAj>gdU)nT}a=#C!MC20e*1T^_}7^5Cv?lLChUq|0;mnpd;EY_8n)gG4R z3d`#3oBB$MYfxcZ-|_Cg*KYgFXEV(&dB6tRO``n4&F6-&ykMW*H7U|L3y^II`2+IRSL1&}QYd zv-(7$_FqW0?p$|bo~->D$;h3TPd~yS!+GzSBXG?*<;$T=X3mGd?57RHX^h|b_b(Fy z;S1-50&)um^uIpI$}-}A zO>mgtEWr_ia|F8yzCuVbE3HIZ$g3`*+7UcH=#Uzv&Zb->N9(N1(L)x+P4%A;kq5K^?tLh8UkqY8G`UY@9pL2~@ge_Wm#3<=z* zgCZSutVHnnI*2hOlVBf$41#q27ai=^4Iny)m-)cZm;|If!FH?7p{yu#DCL#bGNs&M z^)^c9Z}>nGe5HDPU`Pn0@sLOu1IfHJ67GdzyfYFiA)Dt$!6X>Mo1$P2l<;q&U>oG| zr=uatkWU?bOcu`}qmuc7XoxqDCvF#U3yC^R)Hwc2G<*%Y>V=;0z9FTEDunSRM5Pfl zliXyYiV3C?WN}YABwMkjTA6 z4dp8{ppPM&7&8b;3Fh)088B&@u%(VYh{22jr5 z9|~{imyp;}f@S>vY#9C<#^G#;g);SGHgo|4#!u!zTnH_!6kl(EdIOLL4~L}0m8isy z|E0J%B@@3q`laQIYAj{7br$O)GmFYgP9s z=&gzPAL>C?)*wrHkl@b<$T-Q3d5{$RTfV{=AD0KI+O;UOL0ypt$2DdVx*yYIiglDj zN(S&n1uz;ScuN78z1zcM{7?amgi7vT2t&2cqtxHjv4!BP(XXQxO$6)t%t_GK@F>wv zg2xCp@W&>>tn@$ZhwA5(OP?ibHBnCywUPHMf+T}zzlj*DsOm*tTm%W)myq$5y1WSP z05&{EZJi7%$brhk&a%bPeC-zKq3)al3uRev+oQ&pz&Tm=vmN64nJ^lOW?noK#v`$s zKRXlbNQ_aWB*1B15No&I&(jU?4xa04fZ4Vy8DSOI7!A zG?eGgfmm)^2$KW5kYCx&H}OLYp?444QRFS?=DCEdb`Pcy8va}*nE0saU^FglfCWJ~ zHt~5%dKw$>#d9H^pPCDi=HJyeNLasjE_S+c`hTuZFZ;Lk~-ceI$ zi%W~+Qpx=|x2RQ3@UQv*rS z6NrHJ?$1;kL%di5ieV)lsT6wmO>5H+b?b+ANqPQ$k-U5v?A6%({36xlqr^O zi&sKMXs-?|-&~Q9Om(gm5XKYlgK*=&sZ9+{{kJX6{Nx%4`F)4*AcSeV0;Hrqyy_7+ z9kwE1rN8u(zkfrN{FCW+)(|TH9I9=IQs=IN2!OZMEEFPmS9E zZjFhyfE1>1n#gh9c|O8@pM+Fh_ZlDeB=pSw2&+B%`i0eHORb6>{qFIpFJG~!uH2%~ z%C}=Kh1(>4#7?QaL)r+=sZCE}CK&N8YW4)!t(A(^QHI;-601l9@HF1P5mNP3tq}MJ z@SirqeTI8bAD67sOt45ZnZ?Z4Jq@*b)1Rc7W(!P}QbJXD2C4u$`TS=gk3|LWZO_6v z);oanO}GJ`=0`WdP+dv@s{MkSo`bg`Rc(I`MzA22OcN*Tqu9q59QW|$9| z>WiD9KZEo9*h`R*dl5xf>SpB?P0F7$P06K*{TU%eqjX`7M^jaUDT^mtg}Q-L97*N=`jb6?9gQS?Nuks# zz{9pe+5|Gd+og}_(t8|VA}+Z~`R!OzbiXrzBuA1_SD3SHt&zX56~@9H>V>V4PowO= z4SEj|oqUUuTfF1CNwt`vo3&mch23L&WE)I@TJ`icmY#>Wg_dNZoRM8?+(aECw zx)vC%TaHe#+xVs(kglF>feh#;8f!=n)l13)2>(&d)EzjAd)23Qz*fk;F{|B%{W|7f zZNTJ^9Cp*d_EA4Hy6cub`8tfyJwldkBg>Li%^T1R;KN^i6C!n9P5khi5Us!2y_esF zS+I#uR-pr8_&d8Hlh1e_vfw4Y?s*s*?fCVgh}RumgU;d;fn9n(ylo4FJOA3>)Z-8M z{H+TRidQRs8gI8fZf<7r<*!|UIEzJZk?4foivPVICDanIO7mM~7T+5kTJ$&QRa(3& zYZ|ncz>V@$$yvYUe;>j$b5u{Sdg7@a- za;dZoQVf%noBVJisDm(5!l#@2U!xy`c3TIRh+a{t{B10(sRO*pRBZQ!H3 z!SC6)oxDE^mlfRyO@6^0VbQ$d(S}W9+rm=aVX14zbr{WS@;V}8*NpFoj$1RaBPi@x zP(pi9!lof@LH*o8{kkN5UCMaBVMC6E<+O+8>?+u! zw1pMA!wQ=;9meR#OWnrgW^fz(9y6x58`GN`_h{OT`EFx=m&Af6LPtXHrtuv~Y0Wk6 zq@lZl-AOsSirh&PnkII{CAG|QO`YcoE2lq0c}H;gg{x`&ZZPxY-H;8f zYWZ#$Ae%*mQM}*SHF2YCej9qn3#|d(kP#d5EpfAhfBQD{)rqxxg~z@Fli?G!{vFsS z!}omPK1i^Bjf$sv43%}Y__{!e%@k`n?!{vF6|sq%i1$AB>&7brbRXK2;&Wr;_4tde zy^MDATQ{Vy`R;vC0+-a_{g8=SvC;WiN(?W57cR&?wxzu2J@^C(f8O{mMDg3-hu7hG zFCFp$?2vV~VZ8Q3toi%{u)t5=nu-G`^(YTJ2=`(_T;yf$I*813B-{uu^NSy1a;>oq z;KvSu6aL8;egyZyi~Q0@P%g_hJC&pP;=`~LOa46h2<(L|>iHuO3^KFDs@Gb<2(W<% zx1m2mT<{C%d15=P3)|lyEg)}*k0ri4gRbR!4?;goef#%Ad6yG{_}U|w`#5s1i{j>z zfGM{e;e4)tPp*FtZsXhAA*2838v+XwuG82@;&pmK(sc^o?ckL}%5Mkrd2Z0~!jEBI zuO!skYdFeSpg}4Ny)NvSc%8zU-n{i=h|fvB&T{-f?}N!q zlkdal_|#8eu+ugrk#GG3;z#uRf7q9LUA95n;9JGK^D>B!IEq(+WWN3=B&L6Ze)8r4 zj=_GYO5L8mNNJ@fbq<$eDBPyD}2f^c0w`8KDeu=~GT|s)P z2qh4gUwZFR{HVB(RGlORCkV2!=n0(asH-o)cbalq#Y!N_C2&Irm;=OR!AOkN5>M{H zv-^8~ZwFX({&@1)@8cIcAj;Rm{FXx(o5c;Ef(Z(EuTS9wtl}CE)WK4I{5aU~O7(>Y zFW$90nM&3470=botf7(X;XHaDXr5K;fkExGIvjbd+;Qr z@UP8O>^2p~$$=nOJzgS(_(A(IE3 zg;C}zro{-6e)Bb%&pV5`#c|$v77q9VYQtNCA1^uw=ExN6)Du`+SGllEvD)kFY<8=6 zd06;e=U|9-AlAR7?mh>Lfo^DN=OLZG=bVR$+F@9?=jGSV134lQoYuuYN_IGD;7qY& zHy)jMC$lRXsA4l8c?~kdo}l`mn;w(8@0j?G6waT%20{IQz3(gg$;@{V@Lp3wmXKC) z+w{IOO()(A@5Q3)8ieXfk&)U%4fq9a*Wz<)k<6OujqhJF8xEyBOT&tgd%uPy>E?*0 zyhX#Rb(Kix_fUIknNtVj_;x+}lTOTO6we7@S(x4{0c;G`ycNI_{0pfiL7n<#0DDOG znN18}>M{fK1+4>1FY`YIvkP#Q*N3ob?Q-PoR6h%0ArO*FvUlPr@afM+blqP}EDVwrqW4r3eq>qrT~QT3s4)>o$&W{d25CEpXn#^Rpl8_R~xqX8&nvoJ6vKyi{6#Ag0y z;yy%x*}vp5)s`E-EBxkOPg>F<_N7E{eK{(LO!=Rb;Q zf*;jmv8+O7_~eyOy0{~er9&itA(1_#dlFk^1*wCQ*l`9=b9~2- z9R3WcdzN4m!E*%96TCpMnc$W+zma$ZckrrY_9|>tO?_CXFL2&3lP&XkgR0+@`8OFD zI#2MknQYWdu?xLLv$2J0|4Jb2t0!~!(U5(Xl`FOyTeY|I)g&txW;Idb0ae%{?7Kp| ziv+Lm#{R6|5HSd0KaIqrwOA{bR9}Bn*ham*M$kgAo#1C`yosOg&qjewjUT|q$@r+c zU@-H6@M5%S67E3am7tK;%~*m1{J|^?>D_!!7K??y@>5yNWq6U42@_u;DvIwJ!V=TO z3<>N1PTbuDw@hL)@dzrpVJQ0+n%~T3-vQjszaPf3bV5N3&&XlLx;-Shm*1bmhU?xV z`h9*Nhh@S}{;wQX3m4St;p~bJ8Z;-5#TjnQ?^cq1g|E$H4#SNp5TkG5!TD@8YH;SW z+Y-q+9(_f*y{2+>ovo^-+M_9$G^R)q{yIcjn)wg;EVJYTq&;$NjZHZq>>xNuAj}f| z{f&xZ_zHorOU#CFLLcINM6ioDjA6;}4BtG4^@R_4>lhY|3+n4JEFS)$_87~O zwD1KVIDrj{6t-_B&%IAAMX#+yeX6dWz>fLA5neit)fgsIPzgB)InKq~r?GzeHj->7 z_?Y|N!4kpEQ}1Ah%px{FA-*uad%6QkSR8ERX(f1H7q<5z3E{P4M7>R#pC(GgUI$U$ z87GRS&ZEcoI{eM6%u#M%sEDD9QBxRuG!*R~ZKb0I4+IgoyQm#ScS^MPLexo&69{qM zkp07flgvr}O$jT|2_JvRM@(m7`83C#0LmTl)Lgv8YIk^DCcGuw7+h6@S9@EXqYNM7 zYV7l^7VpmzVrf1@6L_3IF`eCsxtDnc>&+4k>WCTa0j-V%DrM*P5hnG-9JUwmxo}e% z&gN?VZW+rK)k-Uu=dlF*C8hT~76Zq4{ybbb?P|k3Rt6z7o4?t2&+v=o7|&nvmw_VZv1bND}q ztTB^PUQll1>ntqFXC$e@bYZ)Ng{O&wP5cBW4%Q14Xkz2}0b6-|%4_QAW!{@n?cqOI zSbzAD_p!2`;8Mp}aU;P6b$=CWYP_Aair*(&Nk=jj#SePoXS}ggyi64?f&wav-|z~F z8c%SDDvl5&5{UIA_80NopG3S~1mY*9C#l6gD$>PF86oiv)vPx5Bn2q_*6mGR#2yt$ zqGIM^c>H{3Y+Ou?U#Qt#MA1)h$^!&z2}C+U4`t#<0>wEc~%!jr%C?oYnM zF*{na$U0InsF&xnle*kV0ERA!@bS^!@0R(y?622)X;;ZyS6PL-q|#gb1FTR@^|%Q~ lRQp4(F6nnzV&>5b8=E;>N7a9fmjwUv&9!%_PKEsf{|Cr4l05(b delta 7681 zcmbtZ33!x6(w^!sb7Uqb$%G^K!7%~CA!i7I03qB7hp>P!CYd4Rnt^X7Tn1TwuU$k0 zKefDIQBXld)WCkA=$hcN3IYj-;sA2Up`sWrb=?L3sva&~pZ|ZJ|DPw%`&Cz0S65e8 zcUO1*aV>Pk*P)TGhKE}X^!wxA9qxjEiMJw0?S<#Gd-i^z#QOy zR?IP|7FWnZs5R8W3{fh;Fy62SDZ2 zaYYlqJGaEe2O!Qux>^t?RXf&{2q9YW{unJ{e^^Vw1FnU|rdn{tR7ykDTz^NcX@9)7 zWU&pmjuI`jKQ9i`j_i+)>3p+egTZC5H9T){#c99nPlPNj@j%Z%T9L5m|`Fx+o67HPyjLVF!c zNI#e`pxK~=_mZ&OayTabnR$&d?dk7uX7|R}_J`wB8q<0<#`kQDz46y4*>#!wo6a$V)wv~W1CExX)5D}oqmBtsO#)2#CZ!?O&h87dgg zG3?U*dAR`2Xn9u>7JbDGwoyIB@Cn104DT?UC2-i)JxsgU?|X>~2(K?-c6;WOt6q=# z9kBx@_dLJb<5C-0u!&&^!$5{ghW!i&7!DEyTFk5|D=YVu&CrfrvB4qj>nn*ZKjqp6 z0*6tpc0aRSED0se?9UWkcr|}Rna9$#$+J`Z>nYjf_ z?O`e(-$;aSVSxU866}m}jN%Gu{CK7^8P>7fXr{(66fxX|OS(Y1X#&$du&xVam?km( z2RzmVlCvf=Ej_k|8%teZGW8_GA%-a|@&HpkF|#Xli0Z|RyBQ`jOvRF}FlK_ZWjZtZ zvTQa(KZZQ!b>PuWV@i%#svAPZfUU|~=JeH+mb!gDHJ=6KxQ8-TfFa#rqTR)GDTA9~ z27?p5-JlQoeO))$Z8Um_H(8(88?FI3aZX>@V&i45`WXD!D;sit#aNgP$uLb{nGMY# zt^q!t11T+dF*z*3SPcdv*5^P*`dm`NiVP_!YFGLMY;%gM%3aQCughJf-bXcF-O?Za zOKMu03zf$EiFd#L?_7!jSF^=GlA&?RMYrc9N z@TVcr$+(1sG~JO8M?zY0=mtz>Zdx&Of|mASJzcQ%2pF6Y$F$0>-oc*#kicP8{g^(HbIMd4+Rxmt)d4(`FOD-B@hFr|S zZn-#N(t12pLhb{VHZG8HUC{+BTA12%61js$HR6p&9Y=GT*@*GM4aPM3g1WPe`9&}CQ zZ7yJ&>G#ia`uxTI8lP%sr^(fH6sOMv?|^J}Wdn0VxKkU0oQU9##OkIsjBV*!);iwj_l9hxzJIXt5rPH&H+o`92WYI@A+Zup_Qb-tqfkTXck%T<2L zHBzBe(Ndp})u(D80G`yRKM9uE^7<~!T?K8DII4VfM$@9CpIn#fc5cb=zCMR!MC&*RO_7Hm+6~PTKIqYIrEBmh_>)ZI}Ws!xZ6)#hMqP+6?3A91Fx?^t%wFi_g)>o{9ZUWBXj@KY9{yd2;Fijm&I3|DE1T~Lm``_FqM7{9z21@Ok zS=AUe{ab9e0iJ?Z`gY)@y3l7f0k#R?i zPz&`v>lg?-?_ zh5KNSV)WGG=tfusi*ZjQv0LNHCU}4jhfi579&`7@X4=F_BpxFUfD5wFcYseD&o-QP z5H#@O#e?t=bi%oZpjZ)}pShfXKOTZQDp6;jLOpcVy`MoeD0EfQA2gT za7*Xvbgb8CF5f!}?#^j+gr(Og_?}px#sK~rY z)f#IpGsN%sY{hYy3vDs<1f`?hX+R)+yx%)(INd#p{pEChva(zvgItrGhy{$kDz|$U z9qjngN!lNpG2;}tOds=x6N1Z5LA+&}2%QJbVgkN(3Swap?mR`4I0s+;61)`gwP(Ns z<@n%f7z{J;qtjHnmp`fahco22X+eQW`uQ`^10Y{dIS0`SG-CH}piFOb9)>_@dB_A2 zp83<&3M~5;^hG6R!=AhHVhckJ`^ZPsRZf`8U=UOjUe z0D9!Ff?1jtSRcd!IMCPnn&vtQyDAz`e_xtlZ&cO!I4mI*DNg*JfkX zFOWw$^tfLrRqDg7`r`6mz|&S<6b3RQkKrIm2O`|_O5L+K+MS-_YPUM*sdO>QSV;Ac zZ1IbersP|f70Mia)Jec3uVp^<2{!K~^qWQ3mQNGyh`i&bawDO9*DTt1yOk&3acMHC zW7z~&sNTN6jAP#TV8YSPBH~OFh;cexA7l}Oj4&1-3K#46HrXXYMarlJxAoQ?0qx}x@49!T|p3t zEG_mpi(PbsrAv2lnVU{s6~`or$=2B{!%%~7C6UTPLL4?9FhYc|si zoAmHZQKAUCRnP7$GVHgnOfp_&sP4trI*Tl5iJx~CkD8vMRu>}liY#$NOnQd(lyPs7 zPG_;lbLNVY|)?MC!wDhFhy=Z+t?3lx$v7(c>~*Jo?EPX=v^$>_hOcntCNim zhQ2gKns=3X-!Z(3Tl$HPz2z9B`8<$-(d90wDRYDdv$6Hu+iMIP7+z=iH#c5|Epx;G zXs1ug5yKR)>Z=C`Gqf5*HjSZONhWwBcnuXW?8Ltfr10K?KMWMfP=axJqA_YUE0ZR^ z$W$EukSEeRO9x5ob>?ni_>B+OFpnV~hYk|o!y3KUU~vgxAv*F!FO$@;0p0mxtm$1A z-HzMyMULqMrt9%~zUWHv)@7)uhOhONL&a5dH+E_uM#hlS?J4!T$eF=i$4}<3H{Rj; zS6IudxMPg)N8R?0bj$`UC=`q7mi=*|7zk(3S|k!oJ6V1Wb}ABGCpK_5N_DwM-ND>? zh7TE}X;SlFxhRJpG@XmDGRVo7E|D{l&b)sx)ZyDjqCLETfg)O3e`nTJOwAE-87fD} zf`oDOWI3w5y4*jy*h5QR%_=VSR?n((a}dwfhl~{&MmiD8CyE|zD0#9tMMv8wEV+kaFXlQ$I_$#|r`X*~MsOqZ<>+n= zEYQYZ?5lLrds>;>!;h1<$m-_O*0+^1ZXGIkFzFE}ks z@lpxc=-yAmc5(-*^qO=SFM~h`ua zTbf+!FR-sa#eK!%J}}|P646c^iPU{1;t`{X1?Z9Gt^Tok>`YM)^z8I$IXS)-e=Zk& zWwp8s`&NiFO6BgU5J|8f@2jAdxI}-uLO7uXyZ2XH_;F0HqV@3-PN)*S;cHx4C6euO zr;)p+d{{-5OK9o?q2-hZ`@P;OpDGVm`QR+i_4C|LzDiys zYmddG)%9)GIMbz78v`iUfg1~3KJmA%T zQ15||5}F)&^s0Q4%JVL`e&5G1AGzD6XZpl(Q{E_msAdD>GlPTQH)n43ZLSW|jT5Ig zI!g{tEDaWa2dC>J7SI9EdY~2BHXHr}hsEN!T2at-p^>Y9A1_M&l{;$(>L1rqvhd$d CU;ShN diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 257238a544..bafb6e3770 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -246,30 +246,53 @@ class OpenAIChatCompletion(BaseLLM): logger_fn=None): super().completion() headers = self.validate_environment(api_key=api_key) - data = { - "messages": messages, - **optional_params - } - if "stream" in optional_params and optional_params["stream"] == True: - response = self._client_session.post( - url=f"{api_base}/chat/completions", - json=data, - headers=headers, - stream=optional_params["stream"] - ) - if response.status_code != 200: - raise CustomOpenAIError(status_code=response.status_code, message=response.text) - - ## RESPONSE OBJECT - return response.iter_lines() - else: - response = self._client_session.post( - url=f"{api_base}/chat/completions", - json=data, - headers=headers, - ) - if response.status_code != 200: - raise CustomOpenAIError(status_code=response.status_code, message=response.text) - - ## RESPONSE OBJECT - return self.convert_to_model_response_object(response_object=response.json(), model_response_object=model_response) \ No newline at end of file + + for _ in range(2): # if call fails due to alternating messages, retry with reformatted message + data = { + "model": model, + "messages": messages, + **optional_params + } + try: + if "stream" in optional_params and optional_params["stream"] == True: + response = self._client_session.post( + url=f"{api_base}/chat/completions", + json=data, + headers=headers, + stream=optional_params["stream"] + ) + if response.status_code != 200: + raise CustomOpenAIError(status_code=response.status_code, message=response.text) + + ## RESPONSE OBJECT + return response.iter_lines() + else: + response = self._client_session.post( + url=f"{api_base}/chat/completions", + json=data, + headers=headers, + ) + if response.status_code != 200: + raise CustomOpenAIError(status_code=response.status_code, message=response.text) + + ## RESPONSE OBJECT + return self.convert_to_model_response_object(response_object=response.json(), model_response_object=model_response) + except Exception as e: + if "Conversation roles must alternate user/assistant" in str(e) or "user and assistant roles should be alternating" in str(e): + # reformat messages to ensure user/assistant are alternating, if there's either 2 consecutive 'user' messages or 2 consecutive 'assistant' message, add a blank 'user' or 'assistant' message to ensure compatibility + new_messages = [] + for i in range(len(messages)-1): + new_messages.append(messages[i]) + if messages[i]["role"] == messages[i+1]["role"]: + if messages[i]["role"] == "user": + new_messages.append({"role": "assistant", "content": ""}) + else: + new_messages.append({"role": "user", "content": ""}) + new_messages.append(messages[-1]) + messages = new_messages + elif "Last message must have role `user`" in str(e): + new_messages = messages + new_messages.append({"role": "user", "content": ""}) + messages = new_messages + else: + raise e diff --git a/litellm/main.py b/litellm/main.py index 137b5b0d83..b02e8cebd8 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -445,7 +445,7 @@ def completion( raise e if "stream" in optional_params and optional_params["stream"] == True: - response = CustomStreamWrapper(response, model, custom_llm_provider="openai", logging_obj=logging) + response = CustomStreamWrapper(response, model, custom_llm_provider=custom_llm_provider, logging_obj=logging) return response ## LOGGING logging.post_call( diff --git a/litellm/proxy/api_log.json b/litellm/proxy/api_log.json index fb29cac248..85520a421d 100644 --- a/litellm/proxy/api_log.json +++ b/litellm/proxy/api_log.json @@ -1449,5 +1449,4802 @@ "stop_reason": "stop_sequence" } } + }, + "20231013211247627627": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "this is a test request, acknowledge that you got it" + } + ], + "optional_params": {}, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "6e64f6cb-70b5-4e05-a192-6d4c80ff7614", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "this is a test request, acknowledge that you got it" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "this is a test request, acknowledge that you got it" + } + ], + "optional_params": {}, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "6e64f6cb-70b5-4e05-a192-6d4c80ff7614", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "this is a test request, acknowledge that you got it" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "1644e83e-38ac-4f2f-88eb-bd7fbebf6be1", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Acknowledged! I have received the test request.", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697256767, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 19, + "completion_tokens": 13, + "total_tokens": 32 + } + } + } + }, + "20231013211248238166": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "this is a test request, acknowledge that you got it" + } + ], + "optional_params": { + "stream": true + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "91531fff-101f-4b61-9c50-f61b36e68455", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "this is a test request, acknowledge that you got it" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013211426403031": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "735637ff-cbe4-4a14-81c1-738d35370072", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013211444781151": { + "pre_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "max_tokens_to_sample": 1024, + "temperature": 0.5, + "stream": true + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "8b2c9e5e-535c-4d37-b556-98c71d83f86d", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "max_tokens_to_sample": 1024, + "temperature": 0.5, + "stream": true + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "8b2c9e5e-535c-4d37-b556-98c71d83f86d", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": null, + "api_key": null, + "additional_args": {}, + "log_event_type": "post_api_call", + "original_response": "", + "complete_streaming_response": { + "id": "chatcmpl-e4c0a2d9-ed36-4f97-9a0a-f4380785d2d1", + "object": "chat.completion.chunk", + "created": 1697256885, + "model": "anthropic.claude-v2", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": " This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\n\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \n\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \n\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\n\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\n\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\n\n- Check the server logs for errors\n- Try reproducing against a different endpoint or server\n- Capture the raw response with a tool like Wireshark to inspect it\n- Check for network issues like packet loss\n- Consider setting retries, backoff, or other retry handling in urllib3\n\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it." + }, + "finish_reason": "stop_sequence" + } + ] + } + } + }, + "20231013211456208210": { + "pre_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "system", + "content": "\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\n\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \n\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \n\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\n\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\n\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\n\n- Check the server logs for errors\n- Try reproducing against a different endpoint or server\n- Capture the raw response with a tool like Wireshark to inspect it\n- Check for network issues like packet loss\n- Consider setting retries, backoff, or other retry handling in urllib3\n\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "max_tokens_to_sample": 20, + "temperature": 0.5 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "7a948a2c-0846-4476-b5f3-2ce0ff52b5a3", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: \" This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\n\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \n\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \n\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\n\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\n\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\n\n- Check the server logs for errors\n- Try reproducing against a different endpoint or server\n- Capture the raw response with a tool like Wireshark to inspect it\n- Check for network issues like packet loss\n- Consider setting retries, backoff, or other retry handling in urllib3\n\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\\n\\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \\n\\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \\n\\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\\n\\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\\n\\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\\n\\n- Check the server logs for errors\\n- Try reproducing against a different endpoint or server\\n- Capture the raw response with a tool like Wireshark to inspect it\\n- Check for network issues like packet loss\\n- Consider setting retries, backoff, or other retry handling in urllib3\\n\\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013211702601907": { + "pre_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "max_tokens_to_sample": 1024, + "temperature": 0.5, + "stream": true + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "d9139278-c263-421a-ba92-4d58c6297798", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "max_tokens_to_sample": 1024, + "temperature": 0.5, + "stream": true + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "d9139278-c263-421a-ba92-4d58c6297798", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": null, + "api_key": null, + "additional_args": {}, + "log_event_type": "post_api_call", + "original_response": "", + "complete_streaming_response": { + "id": "chatcmpl-2e4a928c-08b7-4f99-8d80-69cc9cd5ee70", + "object": "chat.completion.chunk", + "created": 1697257022, + "model": "anthropic.claude-v2", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": " This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions." + }, + "finish_reason": "stop_sequence" + } + ] + } + } + }, + "20231013211716068287": { + "pre_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "system", + "content": "\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "max_tokens_to_sample": 20, + "temperature": 0.5 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "c3aba2cb-431d-4778-bfb2-a8cb981ff1af", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: \" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "system", + "content": "\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "max_tokens_to_sample": 20, + "temperature": 0.5 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "c3aba2cb-431d-4778-bfb2-a8cb981ff1af", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: \" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" + }, + "log_event_type": "post_api_call", + "original_response": { + "completion": " Invalid chunked encoding in HTTP response", + "stop_reason": "stop_sequence" + } + } + }, + "20231013211719949668": { + "pre_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "system", + "content": "### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + } + ], + "optional_params": { + "max_tokens_to_sample": 20, + "temperature": 0.5 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "95abd0ba-d6df-4d5b-a3c7-b432df12b146", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: ### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Response:\\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "anthropic.claude-v2", + "messages": [ + { + "role": "system", + "content": "### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + } + ], + "optional_params": { + "max_tokens_to_sample": 20, + "temperature": 0.5 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "bedrock", + "api_base": null, + "litellm_call_id": "95abd0ba-d6df-4d5b-a3c7-b432df12b146", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": "\n\nHuman: \n\nHuman: ### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", + "api_key": "", + "additional_args": { + "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Response:\\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" + }, + "log_event_type": "post_api_call", + "original_response": { + "completion": " Invalid chunked encoding in HTTP response", + "stop_reason": "stop_sequence" + } + } + }, + "20231013212440790156": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "1f157151-90b2-4aac-a949-9bd9db21b87c", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013212442740993": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "73f74f44-1005-4cf8-9d4e-b610e45faee9", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "73f74f44-1005-4cf8-9d4e-b610e45faee9", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": null + } + }, + "20231013212634080367": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "1f3e2c49-d256-476c-b9e3-ded72b726770", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013212636338333": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "21eb950d-6eba-44c0-bdc6-40c35b96f9c0", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "21eb950d-6eba-44c0-bdc6-40c35b96f9c0", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": null + } + }, + "20231013212713833601": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "9eb5637d-ba26-423d-b18e-6675a8589b5d", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013212716209992": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "fd522c4d-fa23-45da-b951-100279fd0832", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "fd522c4d-fa23-45da-b951-100279fd0832", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": null + } + }, + "20231013212727242470": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "4996a108-c2a5-4fb7-9247-13dc24858203", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013212728665109": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "10bd51f7-f48f-44f1-beb0-429692ee50e4", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "10bd51f7-f48f-44f1-beb0-429692ee50e4", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": null + } + }, + "20231013212822162015": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "f3edcbf8-5902-4fe0-81b5-108ab68c0b8a", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013212824301116": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "ee74e5c9-e0c9-49ae-8434-3d640a56a4fb", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "ee74e5c9-e0c9-49ae-8434-3d640a56a4fb", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": null + } + }, + "20231013213040630177": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "27ac6fa8-750b-43d3-a9d4-e0cd29cd6b6a", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213042848247": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "5f49fd9c-1ba0-4d9b-8b4d-59a47a04967b", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "5f49fd9c-1ba0-4d9b-8b4d-59a47a04967b", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "0766a9b2-81e5-4977-aabd-397b2d8a446a", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697257842, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013213104790104": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "0956ede5-da79-4070-8f70-d4e16fe371f2", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + }, + { + "role": "user", + "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213106766925": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "caf48b76-5699-46ba-a825-56ca906552d1", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "caf48b76-5699-46ba-a825-56ca906552d1", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "ca279a10-04f2-42ef-957b-f6914c522208", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "I'm sorry, but I'm not sure what you're asking. Can you please", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697257866, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 20, + "total_tokens": 29 + } + } + } + }, + "20231013213116202196": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "0a655200-aa97-4a41-9083-6bbd2245f9bc", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213116464103": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "45d6bb72-338c-40b8-b76e-9b1efaad0a01", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "45d6bb72-338c-40b8-b76e-9b1efaad0a01", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "12e06fba-2f03-45ed-a294-1cdc467513df", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "What is the next term in -973, -1945, -29", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697257876, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 20, + "total_tokens": 29 + } + } + } + }, + "20231013213116947146": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "d4ef9365-c599-4ac2-8410-642ec094e126", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "d4ef9365-c599-4ac2-8410-642ec094e126", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "a55ed8d0-217d-45af-b166-a0768181789c", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "I'm sorry, it seems like there was an error in the previous response. Can you please", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697257876, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 20, + "total_tokens": 29 + } + } + } + }, + "20231013213150884896": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "8ffdaf4b-5eb0-46a4-b523-78efc0ae1484", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213151554236": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "6824339a-0dba-46d8-9c62-caff71b0c032", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "6824339a-0dba-46d8-9c62-caff71b0c032", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "69e15407-b9fd-4cec-94d2-915465612e82", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697257911, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013213244723525": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "3d42213c-bbd1-4842-85f8-a67caeda8a78", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213245454481": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "8d5ca9a4-479c-4839-978e-fe71d3087b84", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "8d5ca9a4-479c-4839-978e-fe71d3087b84", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "1300a2b8-ee16-4863-82ad-011a6186e379", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I assist you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697257965, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013213341878538": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "616a6dab-577f-4fa6-bcdc-bd1c64c561c9", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213342683953": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "370a2c6a-f3d5-43c4-ab01-ad67466cbc7b", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "370a2c6a-f3d5-43c4-ab01-ad67466cbc7b", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "9dc52b98-b413-472e-a12a-a7649052fb42", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258022, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 8, + "total_tokens": 17 + } + } + } + }, + "20231013213448796618": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "1aed5ecd-db9f-4e14-85c0-76ab2c4267b1", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213449357105": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "0b701d17-7db0-46e2-86d2-4d1f7ca0650c", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "0b701d17-7db0-46e2-86d2-4d1f7ca0650c", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "1b887188-9148-445e-b711-3d3d0cf6b185", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258089, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013213517902785": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "e4a66a54-aced-448b-8fc7-af323fc3e06d", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213518678678": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "08642a2a-51fc-4b38-9e01-b7595bf9a104", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "08642a2a-51fc-4b38-9e01-b7595bf9a104", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "52d91416-c647-44cb-a4ee-1dc70d2072db", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258118, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013213539152325": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "8de200e3-2125-4f50-82fc-c9b6d12799a4", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213539846020": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "8ca07d1a-0a55-46d6-8b21-1cf098197b05", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "8ca07d1a-0a55-46d6-8b21-1cf098197b05", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "51410c8c-c7d8-4ba2-9399-02a8acd3e180", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258139, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013213744078191": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "aa5f4e9c-bc99-4f22-9a98-24fe2dbe7249", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213835712925": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "19fe0854-3616-4882-980f-87127e97263a", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213914253529": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "63e851e8-719b-4810-8f90-8af2d436bc54", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013213915110326": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "d00f6bd9-5e93-4433-be35-9de27d7bac63", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "d00f6bd9-5e93-4433-be35-9de27d7bac63", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "f8eb8660-0c25-4a6c-a933-5490c2393ba1", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258355, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013214136176621": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "448a5afb-c4a3-4426-89ee-3d9a0bdf0703", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013214136742309": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "1f019c82-11f7-4772-aea7-866e49149605", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "1f019c82-11f7-4772-aea7-866e49149605", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "4dd3e710-cff3-4cc9-970d-1ddc5c733c79", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "To calculate the remainder when 1584 is divided by 15, we can use", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258496, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 20, + "total_tokens": 29 + } + } + } + }, + "20231013214238159337": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "590120e8-4b90-47c1-bb2a-4e267d6162d9", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013214238721319": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "e6783349-3427-46d0-ad71-749f7f088097", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "e6783349-3427-46d0-ad71-749f7f088097", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "24f98227-c801-4d6e-82a0-0ae0a00d5274", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I assist you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258558, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013214340528772": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "0f538792-68de-4310-9dcd-5555fcebfb0e", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + } + }, + "20231013214341284390": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "f24d3c00-4895-497e-b638-67b4338909e9", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "f24d3c00-4895-497e-b638-67b4338909e9", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "4e28c3e7-7acf-4e1c-9ef9-3eb6944d1f74", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I assist you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258621, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013214414518891": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "6caf4d75-a5c2-488c-a0f2-981966670de4", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "6caf4d75-a5c2-488c-a0f2-981966670de4", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": null, + "api_key": null, + "additional_args": {}, + "log_event_type": "post_api_call", + "original_response": "", + "complete_streaming_response": { + "id": "chatcmpl-f65e349b-51c8-428a-aaae-052cdda20e33", + "object": "chat.completion.chunk", + "created": 1697258654, + "model": "mistral-7b-instruct", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + "finish_reason": "stop" + } + ] + } + } + }, + "20231013214414838837": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "03309753-b23d-40c7-afe3-a2e2a24b0f93", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "03309753-b23d-40c7-afe3-a2e2a24b0f93", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "675d7305-e5d2-4eae-88d1-1b5c362f636c", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "message": { + "content": "Welcome to my language model! I'm here to help you with any language-related task you", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258654, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 20, + "total_tokens": 29 + } + } + } + }, + "20231013214415390002": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "509dc457-8553-4902-8c44-432c53acd3fa", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "509dc457-8553-4902-8c44-432c53acd3fa", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "a250438f-1fd0-4acd-a65a-921bb102c956", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I help you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258655, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } + }, + "20231013214424018232": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "dbdb2650-bf03-40bc-a722-49329319b5e0", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "user", + "content": "hey" + }, + { + "role": "user", + "content": "hey" + } + ], + "optional_params": { + "temperature": 0.5, + "stream": true, + "max_tokens": 1024 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "dbdb2650-bf03-40bc-a722-49329319b5e0", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": null, + "api_key": null, + "additional_args": {}, + "log_event_type": "post_api_call", + "original_response": "", + "complete_streaming_response": { + "id": "chatcmpl-cf00dc6d-3cd1-4a22-aaf9-0e40125e7d59", + "object": "chat.completion.chunk", + "created": 1697258664, + "model": "mistral-7b-instruct", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "How can I assist you today?" + }, + "finish_reason": "stop" + } + ] + } + } + }, + "20231013214424390214": { + "pre_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "509ae926-b48a-4fb4-8d59-8ab93ddf2455", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + } + ], + "api_key": "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", + "additional_args": { + "headers": null, + "api_base": "https://api.perplexity.ai" + }, + "log_event_type": "pre_api_call" + }, + "post_api_call": { + "model": "mistral-7b-instruct", + "messages": [ + { + "role": "system", + "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "optional_params": { + "temperature": 0.5, + "max_tokens": 20 + }, + "litellm_params": { + "return_async": false, + "api_key": null, + "force_timeout": 600, + "logger_fn": null, + "verbose": false, + "custom_llm_provider": "custom_openai", + "api_base": "https://api.perplexity.ai", + "litellm_call_id": "509ae926-b48a-4fb4-8d59-8ab93ddf2455", + "model_alias_map": {}, + "completion_call_id": null, + "metadata": null, + "stream_response": {} + }, + "input": [ + { + "role": "system", + "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" + }, + { + "role": "user", + "content": "" + } + ], + "api_key": "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", + "additional_args": { + "headers": null + }, + "log_event_type": "post_api_call", + "original_response": { + "id": "1e9e3514-25d7-4884-af52-a136cc0d0bf4", + "object": "chat.completion", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "Hello! How can I assist you today?", + "role": "assistant", + "logprobs": null + } + } + ], + "created": 1697258664, + "model": "mistral-7b-instruct", + "usage": { + "prompt_tokens": 9, + "completion_tokens": 10, + "total_tokens": 19 + } + } + } } } \ No newline at end of file diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index bd2abc6b2f..5c14d00577 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -332,7 +332,6 @@ def logger( end_time=None # start/end time ): log_event_type = kwargs['log_event_type'] - print(f"REACHES LOGGER: {log_event_type}") try: if log_event_type == 'pre_api_call': inference_params = copy.deepcopy(kwargs) @@ -355,7 +354,6 @@ def logger( with open(log_file, 'w') as f: json.dump(existing_data, f, indent=2) elif log_event_type == 'post_api_call': - print(f"post api call kwargs: {kwargs}") if "stream" not in kwargs["optional_params"] or kwargs["optional_params"]["stream"] is False or kwargs.get("complete_streaming_response", False): inference_params = copy.deepcopy(kwargs) timestamp = inference_params.pop('start_time') @@ -438,7 +436,6 @@ async def completion(request: Request): @router.post("/chat/completions") async def chat_completion(request: Request): data = await request.json() - print(f"data passed in: {data}") response = litellm_completion(data, type="chat_completion") return response diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 7037503838..adb6a4be05 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -108,6 +108,28 @@ def test_completion_with_litellm_call_id(): except Exception as e: pytest.fail(f"Error occurred: {e}") +def test_completion_perplexity_api(): + try: + litellm.set_verbose=True + messages=[{ + "role": "system", + "content": "You're a good bot" + },{ + "role": "user", + "content": "Hey", + },{ + "role": "user", + "content": "Hey", + }] + response = completion( + model="mistral-7b-instruct", + messages=messages, + api_base="https://api.perplexity.ai") + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + +test_completion_perplexity_api() # commenting out as this is a flaky test on circle ci # def test_completion_nlp_cloud(): # try: diff --git a/litellm/utils.py b/litellm/utils.py index b618272ac2..74e81fa8fd 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1419,7 +1419,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ if api_base: for endpoint in litellm.openai_compatible_endpoints: if endpoint in api_base: - custom_llm_provider = "openai" + custom_llm_provider = "custom_openai" + if endpoint == "api.perplexity.ai": + litellm.api_key = os.getenv("PERPLEXITYAI_API_KEY") return model, custom_llm_provider # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) @@ -2936,6 +2938,45 @@ def exception_type( elif custom_llm_provider == "ollama": if "no attribute 'async_get_ollama_response_stream" in error_str: raise ImportError("Import error - trying to use async for ollama. import async_generator failed. Try 'pip install async_generator'") + elif custom_llm_provider == "custom_openai": + if hasattr(original_exception, "status_code"): + exception_mapping_worked = True + if original_exception.status_code == 401: + exception_mapping_worked = True + raise AuthenticationError( + message=f"CustomOpenAIException - {original_exception.message}", + llm_provider="custom_openai", + model=model + ) + elif original_exception.status_code == 408: + exception_mapping_worked = True + raise Timeout( + message=f"CustomOpenAIException - {original_exception.message}", + model=model, + llm_provider="custom_openai" + ) + if original_exception.status_code == 422: + exception_mapping_worked = True + raise InvalidRequestError( + message=f"CustomOpenAIException - {original_exception.message}", + model=model, + llm_provider="custom_openai", + ) + elif original_exception.status_code == 429: + exception_mapping_worked = True + raise RateLimitError( + message=f"CustomOpenAIException - {original_exception.message}", + model=model, + llm_provider="custom_openai", + ) + else: + exception_mapping_worked = True + raise APIError( + status_code=original_exception.status_code, + message=f"CustomOpenAIException - {original_exception.message}", + llm_provider="custom_openai", + model=model + ) exception_mapping_worked = True raise APIError(status_code=500, message=str(original_exception), llm_provider=custom_llm_provider, model=model) except Exception as e: @@ -3205,6 +3246,30 @@ class CustomStreamWrapper: except: raise ValueError(f"Unable to parse response. Original response: {chunk}") + def handle_custom_openai_chat_completion_chunk(self, chunk): + try: + str_line = chunk.decode("utf-8") # Convert bytes to string + text = "" + is_finished = False + finish_reason = None + if str_line.startswith("data:"): + data_json = json.loads(str_line[5:]) + print(f"delta content: {data_json['choices'][0]['delta']}") + text = data_json["choices"][0]["delta"].get("content", "") + if data_json["choices"][0].get("finish_reason", None): + is_finished = True + finish_reason = data_json["choices"][0]["finish_reason"] + return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} + elif "error" in str_line: + raise ValueError(f"Unable to parse response. Original response: {str_line}") + else: + return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} + + except: + traceback.print_exc() + pass + + def handle_openai_text_completion_chunk(self, chunk): try: return chunk["choices"][0]["text"] @@ -3401,6 +3466,13 @@ class CustomStreamWrapper: if "error" in chunk: exception_type(model=self.model, custom_llm_provider=self.custom_llm_provider, original_exception=chunk["error"]) completion_obj = chunk + elif self.custom_llm_provider == "custom_openai": + chunk = next(self.completion_stream) + response_obj = self.handle_custom_openai_chat_completion_chunk(chunk) + completion_obj["content"] = response_obj["text"] + print(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + model_response.choices[0].finish_reason = response_obj["finish_reason"] else: # openai chat/azure models chunk = next(self.completion_stream) model_response = chunk