From 7358d2e4ea3ff40d17b0f51ae5da005e11a4b356 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 14 Oct 2023 16:43:06 -0700 Subject: [PATCH] =?UTF-8?q?bump:=20version=200.8.4=20=E2=86=92=200.8.5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- litellm/__pycache__/main.cpython-311.pyc | Bin 52588 -> 52823 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 163534 -> 164128 bytes litellm/llms/huggingface_restapi.py | 70 +- litellm/llms/prompt_templates/factory.py | 22 +- litellm/main.py | 9 +- litellm/proxy/api_log.json | 7278 +-------------------- litellm/proxy/proxy_cli.py | 31 +- litellm/proxy/proxy_server.py | 111 +- litellm/utils.py | 44 +- poetry.lock | 2 +- pyproject.toml | 4 +- 11 files changed, 228 insertions(+), 7343 deletions(-) diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 521efeb3222d76679c268be4eacdba713b229a42..af1dae3a78812a9f8decff19156009e8a6769d6d 100644 GIT binary patch delta 7894 zcmbVQ3s76vnZ8Ft5-&YKpce=UBoN{yFkrygybQ)R1{;H4W2i%Xg$;HDzPdKHb8c|s zIDTQ9`j8qsq{VIHII-HMqqIrgN9;+xJG<@f+B;6-u$yeMyP0h^osQf#NhjOg?0>E# zkN|`0JJNs8>;C6E|Nou;Jbrgc`tF2O^^#H<&%pDWukzd5^-kI2nGY>=MsrD1? zy@*XBdyQ3TSy;zuc}4guhQSO*Sb=rEWOBopifrT^V`2Q&J_e_J51mCFWQR#l?4}B& zC0(YK$WA+NPGKd{owKQXL?E1mO%Y5O!0nOJF1Q%15*S*>Yood}VU16BK21NI&8J3G zm~gUBAFW!*!8u$!Yrr`N7*5U_1r2M0E^>rqUO)3C3y9e?EFp+Z$5N1H90M`~%RpMN z9HbR1KxX1tkXbkmqz%Wn!OM;lKxX4akU3Zh(t*_=bFm7f6Eh&=u!$g?MNJ|&_}H0_ z`xC=yZaH=kYpy;uZ?$mp6yg;0c6tefeZ<`btzl&Zg(5 z7G41gT%$A)AMmJcQbX>%m}+LQ5<76Fujon)w19u`K@2vrDd)|4vdOHL4fT8pE&{HkAaj zQZ0p;VNKZPTN;g8D8z*w4D>aaqn9C^j0-_~nvwQc5#16oqgoFlK8wXx86=!H0Y+>J zrDbdtW)3i;hKSV^=Yq&iK8B=B;U$(725f=Ge}^R8f>FJ#0FpO6`20l z#1KGR2!)a17o+=tbL(D z>HtHtt0rU*P0;2=2nziIK`CW|JukD(A?YqgDCW1%xCq)OI&4WL!VU9a{N4!6s`Ke5p475#sb> zt7~MOw$B@hl;c%4 zUZgnJ+p#8UWrpCz#>~QL^9}tqbElf(Ij|reh3990=U`~(lFS~WI~T7CHk(CDbtc$C zon{ZhHaB^&Jjy_ARtLa^1%b;wr!W&`*!J-E;DtOgTA7B@iLcT`Ua3q%o#f}0Mv_pKtFDjwW;3ZV z>$Oiu^IvG-e;DM|Y*x+UR0FODj7R?(UTvW+d1k?p$vl5QYh+tzM#hb>$3#*sjT*-` zl9yM3o)sOqlK=5T(P`xZ4xD%QUrMnbTLgPC(CK){-TnOQ6v>vwsAThqRn-QjHWY5gh;YRjsRPJ0v4*5oBZ7W@#Wd3G^E_xa_`Mwd}YlIGGLg@67 z>4GIf6E%sZ0kY@mL697%ahk~k?@3C7cLl@N2evTsZ_U8ze>|LqV^L4{lCq}hdV9FBn-LAb(n zeVd((zGq}FPsrHsz|{S2bh*%m+a^80!4zW6RK*JG;Yw#c`#ri3K`(;T#R?nnhIxoS zJoiKmu7NNUiS-IjnY)F48qo`}ImZNpP{jY+%s634CR~J5d@n`Eg7EcD;>`Vy^JV|f zo5~Yorxd&0J*=A}f%dnQL!fF()^m72=jINQ+h>gAy^SZ)@5#=NP9%^wI*y`4WO?Tf z*k(?2I^|D7Ywjrdb>|rwcM#H~XHykANS@#HcOc!HD~nIl1|wAL1L0I~>uGue6|_Cq zPX)aa;GUym5BcZKKR|yVU*FPTnW?8$PMP@WlzfmIcw&$TZ2!LHNAjS0a&hZ;I9ajn z1$5B=_BJ(YI-DLHm)H8ZyLW(dTX*+bIk%U$_6+tdvpSU#SVS0NhxCGmdx46OZu$0g zVclD||00}B>)I7c`cHIKpm6c6?tekY$mOs71`QI=jw*DNT;1`aTvYt5zvD57L@^~B zAenU=P>xgqIs3*V!EC z;3HOR7U7+sA5rs6$#)KTdi!>9!?ffGNphb~h~M42BV+;cyt@;rNx;2To&xvGoPo6U zy5hbLZM_>okZ<%hlF42rdAm0i-6x;+-qqeyrJUn|G?O=X8I+I5 z1QO(5WRC$Z*k zV$JQun)l7ccg@xJbZPgDne=DKBAzD=#`kS`cWo;IX{rp>eFnszs5sFU$RPKJZT_0W zJqg1ufPYJ7uJ0;mZrRpm)A#MTMhku4b=Dg;OPF^MqPce@@mpllcM{9CFw%D!8KmEp zIcUYZg{#ERKF~HxX#QhegPy+stEfRq-=8dR$fWPzWvtGn@6QzVI{N-RBmF@2F!T8` zQT^xD@k1rjFJz9P0_hh8GMX+C)&JSp$k670G1VLC`+v(CvgrF}oj$1lWVu72lIv5&l6AQ&?P6ksvR+o-RR)UMwM`>AYzB^fZvpZP^dH?NalM&GDKaRJeWu)XpnXCq^_^QAE#LLnk`$)-1 z8anN79Z@0lslV$?Izs8Q}GyW*F!}o6=LL|{c+pL@5f#8oseW?e%r-bg?u|LZYH}YpBWZEaMJHT zr{Xgz6tsG}Td^nZ3#f1=&gyPT*1WN)sclNSv8i!Nwy~+Bsgt9+r{ZAPg99F~+ZzOv zMKw+%@+N{ZtN4&7E|<#sq1NVq@vEe>k!qknf5Lkr2|q^GSD~^U=C_l6fN^f?(^7EbS6nC3Ys?xqF~* zr<*%Lp1#4#&O!MS|ED)Tl4zauWo%T9bT_N@d1X)dkLz5RLsg5 zwKkv-ld+5@B@ibj@iC~DP~nnV#;XKNNqqZW;IN#W>or&5J(cU z$&4~}v~#2_pcS(^CdoLSI;IZjX?8eZVDy&p{bK_GqgZWX4Ax26*l-|C%%(F*sR6T? z%wPbZMNDQfx{UFiW8DFpn9XI>`cd!569K1~&7 mfpRek!_Ewk{=PyYk^G4PabL&{4?(W3A$HCUspG^i zY%BiV7^k?UPJ`Q8@g$?kG+rlJv(u)#ZMt!H^YGGkXLl!^-E^|4GMhNN+nr|rb0rW0 zj2+(@{pUaDKlh&h@tyxX^wEgo(-#$*Yie~OhfhgSQP-F1*EHJ=It^Bn@b_OUOE~UC z^CeCsP@8QEfZ-;|+sE;mt3zO;*B+0jI?$ff!=El{;I=8X zJeTSPofwB*!Vl#rp>b62PaTWpxa;@}J*`{8cG#F*XW8kCvam1#>+1Rk>iUNRRF zJKQ#y26cSQSPZ_0ztF>{ACP$=hSy1Es^Mb~D40Zur5cH2ZK5vR6$>2QOtt=uG3;|) z?#E<1k&`&dEaizA)B2ienqMD1+Mwj14k3{@P)9!}sCc8KYDgKpY1ss~ELyN+FI70Ha|&Kh zXD677q+(h`^ZSXQq{XO15rfQ73bldHVSt<2*~$eY3V#LsF1s8-rLpEZ3i-;BIGQiJ z&V^CP$2jKFeBNrBQjSlDJLMTx7v*>pwfU=}t&&{S)ym-rE%r>}g4+&*C6#JCF5h_+@UX+b*u1asfxvsFvDTRH@Owd<>Js zbvaS69+Jyb2(K>4l|7nStfgt_r&NAL#IK%}c1~O$`C1+))JSe>yHHV(>W(&-uSFN? zYkSCS?@{+L!pBPPr`!W%fO|r*of}~DX5NV5^qx0j3Rh@A<6Hy!w4T4fh`p4U;9dKQ zIRlfOw__?0m-1!>B6SuZLFG81Cj;CR1yb|VmDeyIl`seupu72%Bg%&c z=BjzkG$>0jfG+rn!v!Zxk|CJyaLfYdK29nXcSM3jUHoczZ`hdC$n8436O*b`CY6i3 zrgbRi*RT%oT!DdyEEJ`O(qZ8v>9B2H9g;m4Ky;lUi!Qh`Q)dMN zTyBSZ5q@Cjj0NX`?&o*SJ8Y6C2yeKHi7D{0`vf7OHR6>r1H4$ePR?cK3uVf<@X`Ru zWmZxRt};t%4Xu7UhTkhy(;6)INi{6_WwN1eE`Lun z=y6F%^z4S;m7P&67SBIa02V(|01nRUi3HCtAaTK-unWGuprur{hVPXVd0!+Cd1_`Q za#uK!6E_rA&M=FHLYo`1k4>+$fe@23|k1BxXgFjsbI=`1i&W7Mo( zLTyporKuIqFOK2X>H5_6NtzdIi*T;*FHkHMuRIjCm(GdpC#RP9CnF0j z;l?5pkz{R_iQi}1v_A+O%jVQ3wBcAXbBh=bEG|>f(qtk}L0`3gP+Bf7ne-2?T+2~e zIQvXqkK;qf<0PwrQFyYldty${@AI2M%Y#5>^Pdd+(tD|hSl69{*eu+-D}(rbA!6IcXkF1A8>^(6J|RLwuie72Bi4 zdUm18r@|_?h_{%^>mFU@IU_8ml2Z@RBM$XA4R@9m44#p+Gmtv9c3`fojzZ{^gmRGt zCe@2?O>;^;KNOMCdj4!w?cB5)@t+@yU5JpGx+{@NJZa)zPx~0#iJ1Mq=)Hy%K6T`l z;PRrKTS5MLb|{38mpd&N?t8OR5xPlew%)ggNgR)8vw!L^fJaC?-_ZOS8{!7q(8`J9 zuzQ6mW45$0G@5RX(b%!PsS)vW`hH>KL29Cncz{t$D`^v5$qz;xc~p%@nx-KQw#zwN z{ZvH84?=gXvpjsj;4e~6KD3078Pg9z)3+0&o$UAm1r;gqPs5rlY1RB4|TTZ zn{~Jme?2XR`zHCBv)8M=-ytZhWrnx}Cd!lK>Gi_>;zZdrC!S}~#>?|W%%VUzxYU(3fH+%K(_@1}yw z;+!C@qwDahQNDWgs*s0p-xTqqN!MXmN1(E?m|TI!8tr%_Upsn`0Y<%!O^bbmCeT>C z5yWfF((m!#k3PZjqa#ZG2RJ#eMz>4r>H5(_@cc#FQfUKT-fZAsnug-F)A%1wcz;U`@quRR-yyxbwYusw>vV>Z$B{VW zgl5LGEtueAWIrS9RzUa;BL^7qL2dgD@;Usez1|w>#N8&V#PUC0Pm7zw|4X~+7>7eCqY)+FoSxi8EHOuH75 z$>y!Qe~Vku1$*8lz3}y(8gdM39(!3OtNCo;3bV@0JAve6_bU;c1k6nP;JVA5UI|7xujmQnyjv85`s{ zeVD@4?oFgW@J07ashTe}Tz2{UmL+%fnRiXO@ai)L_~X%}G?O}8^Erp)3r0?E3}!*O zWQN+ogh0`g9Z7>FIONYtEpy*e;aN{jmO|m}Y zTPo^fWm$W5Ba?CCJC-Nb?aWb(?~K>g?aah7Q;GG!TPM;8qT$ZT4S3^`eX3Hq6rK3v zK>Mj5D`b#Vlhc94XElU;99VNMi;yp1@AqKC%ITk1D5kkqmdsJy*`*u@L~j1^%u zHap{mm5g7-$Zpne4DU!O=oWBYnN z-Y#!Qom@8LlullDk&uHAY`t2g>_J&pz;`t!0Z(+;P0md$;|QmavaJYy3mjpScCkPKE@q|8?V@0M;Pm6giT|C z9qolI_6;Q?vYA%oyzK8WoRm69xuHtzWim$A2CO%>#i@8!H-hwfU82ZXMm`Vx?d#iP zP%GOT8qNRaa$QDyVeWI!U;Gu&MVJhdvV~y@qdlcKFg#E?-8$Xmwh4~e(|#=vLT@Uxq^^RNPXu-B?G#^ zAS!e)!U82*FWV_F+d{dR88Q)ju*WM3=iuFUEMzqB$9Mjq7+kF-i9wFhyGfd$LawT~ z6kRY*X5%?+dN4s|6S;IlFiB>Uxis^zg1}POEnE=TuccXSamRHjd? aV3o|G*{YEB@Y=64RC3$az-PbeiTQsS>Du%F diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index af8acfef82538ce0d755c75f370bde75097aba97..53af3d17fe536cfc3e4c17b646378163372728e1 100644 GIT binary patch delta 15795 zcmbt*33yaR)^^wJbZ6`A`$ASA1QJ5n61G4HVb8+m@+mf*bVCB!yxjqUVS>Vpj*1{x zDYsEZklk^K(6}O^45Ok628gXFGU~{<0HgDbqd)qc_uQmAWb|L2zn}P0x9U{YsdG-9 zI<@%DmB4Mc1%>Vk2?;XLFC~4LD-v@=Daa?i^|QshyNeD(bDqbL_A{xcR7%nOzLH0f8x#n>Yo17f5OnPnmSL??##>7uKOxNdnGR< zZh$=nZ1mDRZ1{s>Pu-uUozGh@(zT@{ZV(w>;YDk&9t<&Nx~4FHU*}u2G5M8!gxkNo zNDS0+MqZoZu{5a;tF5WlT5YSV^MuwlF0(pTIvQM7S4)$lef`Kqia5~z{^-F*QL&}) zrikvs>32-)vSfA)9NlRdb1poly}I!4ezw6CF^1QY5~q$eyf#K4JvOK$*Lc*BSu(_U zG}A;mM~9e@lRI@7eH=@eKB+8TIT5SS$BFo$ig443jN;6SVAH!MGiALSY@)1p!;MJC z6h~I(YJZv(=eN|)V9@H5lC{q!HDs{08GD6e_h&l`?ENPD3(#yuxe;t_y}1t?>_OUN zMSl^&@{=+$xVFiQgDorAP}C)^KrJXYSzBBjB0{x@Df2{_Ry`#zF1){?G>mOE+at6+ zQ!+)Qc4EqaxF~xx>B-9q>nUb`tR{*xMO<%38EDuT@5*2WeZ0h?Jy2Za<72RVccusw z3EF_E!(%-mO^r^MS1al+;$PjZl}(*%-h<>p?Xju(A#NnM5@egykF=*2#97os$a)^| z!j_VO!7n2962Y1(ZQIBqt-fT?njs^%y9}NteW-uO&$}E{+JUZ5doCzk%dEF2}>DMEtCQr{( zjtvLtvAp8QnMTub0jlExgJzD&8KkM4C=-$Q+hSg$uxHhjTTKn6qxTt_u zVGs1WEkmpS{vh~3l4btv^tj+2quq3sXSHaZ^9C40T!Y!h4(D59La85b4`lse=vRtz zh33%g3C3`;r$#GG;IF<1LoVIb;Q zoqDrStBLC8IhiDYIdfA*oK`Y7D=OZeaCIGfqPBK!+<+wCDAxwIV!kH#%EX?mJu^2+ zBx@(;#w88#(b-LX(kTgsttUlvs6C>G**u6&OD~JlD~|i$RP=Tf2<eo4STkwBX zd$qD!@v+4HpDCL(C}Mx=PFni*d6&QaX;*(Q6Z_M(apkE|$$e@wFRmDEMforqfh-@8 z zk*9rFm888h-;(NyayD02JDkp?&2{~}<{1)!=9ym@zLyf}EWlHMgIdOdA;PWASuiKz z0A+YAs>9jT*x+^P9_krdNIn{HihmrgnK+i;qJ>|1VwQGM`I5QkA^rg|vi<-o% z+6a4z_QIkZKX_N|vqc43`ZZCb{|wsWfWH9V1iS@U0q6kSPv8mkF2(A*TFo_C;_uq# zYsM})0fKRW@qh^ccGoA7>gTiBM~AbI_Lyma&2_Fmqu`N!hHBk__W>Sl@U{8kw07OK zGyESVvvodc)*f*rDQkpw$dR4gU0Bw~Z+DLz&tKbScUtng+rPecfw`pAvS^^;jg;A` z3&tz&P7?IdVJIo2kM{}{`gm`A(4tgRmtS%CqD0fFXftJ1=*-~O4!+PliBypGqHn*E&J%q$a4j0mm8zCA=Z#!|DH+kqDyGv zLNW(Xfv!;Uohfje-q_3)rKSHIO|e2~WUJzep+tSU^r%wdJynAz^#Q<&;Cz? zr|?5j4{5hmPZwWn;fqQ%i#?NPz>=(ssCEg!QHGsTo^bTlOpzy|&MI5=3W_YAwdXAU z4rK2F5ZLfUQ0tL`CI70OX9wtE$WI{p8Q`0$9(?{Aa9Z9RKtfsJ7-~+Tkk9#%_OK&a znaYMe_FUntzJ@(&qHfr8IxQo*Cspj62P8u}JsI99R9R2~gI}i8~VAc-S zw3LlEMO-pm4Qs@21akiar}$0hf?vh<*4iUN{G=7F7*c!=eISg?HtT-O7wp}NB5%Y; zVI>E091*f;S%0UsT`LBg0?9WlYd^W--~MrI8;GH^6*UtrNWcr)d1toyMI>L+B3x^e zEL219kR!>^aE;99=fJ>GwuRN{@w2JRoa$k1VDnINP!-Mf;?DM$ zn=AcjT%l{y1~7+l)Je7<2ezI-m!r;6?{KLtYKS&xO>%YwIAEv8uGU+@iS;fGKXn8c ziqbZ(87PjlA6PRyK>S(z^tOUvRIUBONQFOXgV*O5`fR$ND-J;)ThI#ZTxDm^E*>$B znxbu5pD^_ANM-=o($`bU6S&ObqRCKQ<52&F5|-AF+R6396M3yzj?8|-F>8q%28_pw z;R&(ZoHbRAHq~xzR_oMZl$RZ*`UcGNfoB2FD;|HOcJqd)L73j2!1@-et44L$?0VSF zb0`W!i2+1wdo~n_uJ$iB{9w$BMotdEOW^dQMwX;anZok(n5*k*X$Pt164CAL5x+ky z#9poQ&R4Rq;ANXULG_JxN1e67R_{&*keTp#MNJMkmL++=@HAEZo;CrU!l(G8Rhw1~ADMGvT_fgu#B$IagbHPmw7p)t^g?_b21;Iw7X)9W+qBddZ83em`_#Js#a!CK{bDUI6}s|-I9+7^ z^;TEo3P*#Jd`<1hd|FnxD<1hTwXZX&WxQ0C$i+M~UzK8TzCnBRr7@MhVjd@GJb_Nz zGDp2_g+o=RfkmV0>w^jWs~mQ$Q7c&W!mbn%`;1ULKX|E8&Vfz`nKPU&*%mc=L`wJ5+6>A*t&CzM3pZmJg4DEBadF zK|BhX-GOSbGIIS#v~91bZB_V4{Qqi{t(IG6!(kReZq`1YxuAx5)4B(3RebS}o^i6|~#ICFkQ<*X8G zapf%QoYL#{F@zM)8kwK(%{!?*b2cIFJ=6%BTD+iiVQIOy>{0ELv(ZUAQ5H3G(To|h z%4bY3E-CevAJ)P@ic4U5Ma(a)oI9(ec%iSfRh#nB8T02h<6+5s-@-aG*lg+6@Q*%iX_VHcJ=d+kfgU! zon0yA85XK6)Gv^^MtkVn!r^gfG#-!uVB@)fVpx#+H2~sJF9ABYd>1bcY6;(Ew?T&L zHvqOH3v}QWBix{R8E_KtB>)OhzXGszU8GcYjM{|^J2*du)ENSgzoTJgt=ib2B5w1T z9jnPC?dtm|_W)prd3_A=17v;(I7{G(>{+m|UiBK}N7|+D;?rTP>c;>WD7iYe${370 z77(Xp|8u4(ladSCSN`+;P;tmjA>TiR_)1PUi%jvXTy7T0;-I|UET)Pq`KDP^j0Vdp zf_#p!JbtwewJv?T&HIQ>j4cPnxq#7tQL@ZmIK=m|-CxXFZ~_z&0EAb%FbY661<)_% zI|`C8fOij0jyDq-918OmBuiEVh`3?*Zh>L^0ny z5`=r@JBgwocmW9cfHy~mCyDi9vb--zj2=N-MT05;3c((;E(2p>opdlWssK}L3(iY)at^v_(gHTvNb0k$X5LG#z4 z+Q@IRE@puZgqxo5>KdENisSP#IuD`+R-KJ9Ry!6ER=dZ)+ScSkNG0zcAi~;AD5wP8 zcj&ePDGtrckYZc#?uz=HjR&GyI6-#2%H!a$*(Yyb_^s;{!r&I6;2OYnfC=*6JdtU>5$TEYg*-8EffeZ`1Rf*i_%G^TwSmZ| zc1gMs^BgBX{XjK=vPw}jC-XZchmV&(B%=7A9 zb)b}0%{|VgPfsvT(0Wb=RaPadw;ZIbl$Vp{Tku#X*s`Xqsq2&hSY3;QQoHcsS;TDf+d*w@Aj2oH7*Sh)qzO5m|n zH#Xy{kAq>Izf-F@u0y7>|+}XXmpJMcmfa%{XyX);kuxe-lUn%c#t~YrObcj5AY@ zG|PRB?xkvbjJ#RZbFqq3x2$;I(y~dOm?%<3oBVpB7+_jYt+cqqCW+fbGFrS4M6J-4 z-`DI8%1}4RCyPW}90uK6$oV{))AEBNk!kuJRlmb+oGd~GEhCXrgv)d{W!>+-Yl;}` zmtdeY1?3y*AN3xB2gn>nJo%UyQr(9?jL81 z0Kee-z~B9VtunMi3=Nk^KLB_T@Q|EYA#SMpMOIK=1DWkW>QTZKhXK5lYNMQcvsft($)h)m)50wuQbiI40S8r4E!t(GQ#6W4 z<(*EkRqT}MF5%E4#C`H^msl9g&2YrCLVoEIYb_ib5sO&Cq^nsxBTmY}E5!!!l6&7u zVG-hlJhV#uS1@~dmUXHeuv!cgAIMp&spHS3W3|W#egjO8LNi0;menGs4btKbi! zr|#VnQ8*RCfMv4p(OnN)g)eN$L`wq!(6Tz1Qe*^d;|GxuhlV4n8|$0u9Io2N2J4Dd zd^&c33Tbsz)kf9obje9AB2tve$`%nVPH&A<;)4#OHXzY`dyA+~Y>TIs7Lu8#p))uf zkJ)Z>)HgOzD8}B_%ZewWu9l9}>S&YfJy>JLdL|zW2aK^P@R<;tg6wym|WFd z8vHuLxro@aZs&c%V+@{(j>W3%5cQ0_?g24C%#|x25L-;|5vM2IArFc+Ke1PC{DW9H z9Y#$jjgMd;kT1yvRQ;9)Phf3*Q={sttF2O7(769fhw3Cr_1Lz|dQ=P;*GUyU!A&cc zS$j%7{!O+P=u3r7_dggsihckW-D@8eiAu_MXxZX)RI7A)x6)SE?BHm*C-jsDwu!7D z(5Qe&_vhP0iehFFMarP1Lku-^#q*qDBd)n{7gUfA4Iaoe8aSg^3uf%)ndI1;>}O}CzW{osCT=*&!2_AnSd%K2}k`bdws(oceUskOBa_`r z^^Q5+$tib~owX!%Bo5nFN|mYgn2PKPC@J1qK4SnA2J)NTHqVMDsYhFmg)g$y~H zoc74oe zMSr`eKVAE(f7jTV9Xa^xP8zuF+ODMB-AlWY#%!L}9hEF6ZJD$!VRva~)X1)=k(UhS zh+$8+yqeHeFy~A`#mRz-j!J8XZE0u0vaW(O6?jrYjfG= zvbOG+6uEfI;*QKwPcQ0>na~w8;gZ1^F|0dj0Cl5R8M?XbT-v~=Z|GRCq$AFTzi?Z3 zMEnD#9TDmDcQ!U{TTEwcR#$9RM|f6ua@x~L$Jcg5EykZ^@%LxL;x8FQV1^IQT4K5^ zshyVeE=zhzhb8?#|GuZ+B_ma$f1N3$)7wf$OtTn{j*2II-4ZlywDI-WsA+k|*Yix% zM$665(=(uoXGGUcus2Vz&uu$Z%IH}-Ve~!ogzf7)D%zt@mOfU6xk7t9p5AOhY%)C) z=GJFv&)5QsX~l%eI@m6Ti*z<}v++M$SA!^14K`|>8>3{^bE3q&0d*U=?$&nU?|%O| z3L=Mb1A(ArZf_?R`ZIb}Q_~NM8Df+Bo`YhaDE3XUo@q^O=*I3HCAmGGMZUSiGX_KL zUFJsW}W=o z3*rW`-(B*e_(_TA!>8^6N*X?jzzvFb7B++$YL)uTj+hf%}`+#7JW* zGs=qada54h1PSbwlaGnybne8WGaG{ZejK}H>oJiK=jD{-)yJgd{$rw2eB!=xOuS}n zgUCFlsuq{Ssk?knkS_X#|-{koMl*cx1lAMsAaRBrAo)uWzWq3okCLaj2)S zKlB8#7q-Ij_L@r|)nMm%8N!T_H@+o`#TECXZ;9VU&tNI>mPDTd#D`AI0%}BW9NXOBBX#?_X7{p(9>j0YWc~QH9aUZez%EE~ zzLlC|vlH@*b0S5gO8-wp>T;~$9=`^NikI~t(RDQph6fdkX=McX1JVH&0*}8gW2Y{L zaO5We?5K*PGBp&RY~Y`w&A-acpNROXNaSV#SnmkzJtk+J!_mZHpBj&%ThWL&uv0m3 z;EgQeIkg<6*8`>kyhftO;ARaOzO7kwf5#T;OVKDt5LsRL`R092=rp^>aD#8&N1;kMXN-e2xb2)XPU7 z(IbH8&^{moKJt2b%|)>~EWt$Uh8GIuxr^chaZbK;NgSVu%_#*tp{U2Is75moD|``k z;R&gy%N=X)fw68zwIIJBU%f0wnhJ^N4fnq;i{+va0Ww{EouPj`X1eIBUS(J3LRsuI z%aP*AWXAH+u(Wm=-Dc2@?iu;uw<1fN#;xgK(`0J!?Ok7eD}*R=fBl`<>mPwTm|6pE z($&RiW3NGeQz%N(_k-78Ia;8iHQD!DNu!}Zi74@d9D9XyRWBQ_&~dDOH1YpZKSpN! zBwoJGje&C{3@+6)02?=-+OhLs)5k%+ej9r)c$`Lb`h{|j>0ou@BTC&~UXuksQw&># z<_^o7eioz5Q;{x_@@HC7Hpo*yixGo(13Me}a{z42p^nwnjwaq*@lEyI`-duXP1jTT z2M2CZJ~5|yx7ZYJuO{Jn%sn?P4}rx!@)mz(SX(Nj`Y2`4b-Ztry$uwG*WuWREo6N{`2RWjPpjPAZ#A!sPaW9FTYvyY?TWsrw)8C0)< zO)59SB4FPWG)T-WbdF16#rP@?O-}BtphbecO z6PbWLa2e%ZV6U(Pl~&3tF^X*-_Z3#(usdOZ5-B35qux5=m!$7Rns)IiN?dR&(!(&Y4bqyTR0Q3E z48ULHp%mrIQ6MIN!z3(FzFvi7`fbJAP!aIHJe{hn%4&mk3VlwY_p1&PnFYW{ZdaP} zoCxL&R%3`9m7&}f!yfR5_ zmo?*+RsJuavloqW*mz~4ctU1QP$uW{68s`s{S!)`0Mpry zf)d}xQamV2|AL4}BNTv;TQmhr*@;a5J!~%k-JSMxN zs(D%OSwcG)cn)v?a1g+4vAc(pSJbt7Y*I8)Nfm3{H%(MZjFxLj7HSmnTuztym08j9 z$YkZ~iffQT6O0#JKC|WZ_XK74ZZ7x;6tz9UV2@7VRE+gH%ISZUbXZ=QLR}t|1B#Uw z+F)w*>}M2Y=ZkRmf?=Kk`;g)#(F789y%3nk--f`cw$Wpn*X*FvC$<4zMUSA$VL(4K zc@lYm#xP>*6KZ9hyk@HMxp>k&sYLmQ5WkfpN|mu@Zt8%nD^=#1-$0#X@@T0t(flUT zZ%Na1WrR2^$4*!3#U=NS>B?b$ir4Jr%9ycGyeEn;6|9bi>P9SKpl4fO$%snwh-_&csQaWGRjCYaV^_~! z{%vqztgmfQPoVrQz&ikDo;!UNpDZbTaQI|K*mhZZY&E$k=mfk>&|_L23*3v%MQdi0 zi{2!%Dgt|^jJjQZQmG6zzl%!$hJD3}W|=)tNf&Rq=gd=5P5SBg7qaOZWppABa0j)Z zv&kHGq3TEOSFTac_>1@C)|-^t@I{zJOb02dr(~FweAj7Eo&j{rQmc|F-j}sj<*h`X ziyq|jK)qx1B+4muN}{6vQ7&IX>p72hDCpQOK17P;_&8ELz0M-#QxwEbK_Ip{B8M^O^FNVfQuc{c8KwuJZ)2M5n*zE zl`>f5xm&7~J4|Mj(~N&+QnWj%MtMmL}{kxXD|UmxSpSWj1K**$)8W6 zpIcjB>ngJ~;EH=VUbDCw<)h1$MDdaP`Q?gLL_)JyE#(*F&^ns4|1EE*QwrK}!kHb_ z-{p)4HylH;&&GB`?FO(teE?DLoM8`?4q{(e7Y5eZNwF1O0C7Lxa~yeqm)#Ndv~aW` zovoCsh13T{0+{{f<&@yf2#Xq!9X7pOy_ra+mrti(Isa7N7Mi$DAMjREvr$v&Q&foz zzjE~+>U4T}_I9-tp%2}6G}2?1mAYf+&2A<}Tta2~5^tw^{6>c^6=;jo)+w9gJ=4XK0eve!DcAW3T=c4p`bb1m{ zj?Xec1%O{ZoIpVbAQQk^=4A&D9Q5ZHgQ&ow-x5BCdauhr->l4O<7W`Lpeg_q0XSsC z8(IB-ZNY<66|d#g&j3FGcq7NFTO=A|!^uPnk74x34TF$ci98ch>yg5xt$H_L3t*Sh zMu_`H{dj}#nclRYdx@`Yl*r{JaBX>nagqi5K-QL}OBy4}=yLK^2eqi6#eYIH+TWwx#T4X^relF`U zLa*Ql23IiZ<&lC^`RDE%MTk1`kmxKo_4tQ5bbFlqW>UNeliPE9T0&Qc?zPEhC!|Yz z_i*`3Zh|d*u?7kHdasc8ewZxnBO^7JRTlTm4~q2Wn&dZq(&VmDP+Tf2?-_A|OY z*{ith|JJS1KjC%uNodpTdU~Rvrd-uKBC+Gn5Ym+X8b4vGUO{>$$;WzU%iZ6`boLHz zt)lUzxKi79@Th5<=IXRJP5#*X4v{Wb^|?i4w3@tw?Bk3OnetLzPdPrXEHD>=iVBe> zNA;cA_)_2LnrLja_3vsHg|hFU-&(F(O)&|LPYn8dkSn#+W@?H_9-V7y>LF0h4J*tx zHx0-xOfw%%vJmBHnuRDwvq$%!hhtIWhEI;vPKImra56Hi)M`1|u^_utSWXSK66v(C z5b3nljIymDwk+H1w5Zw=(IIGRkjW&gQc`91i0W8xXuDxsY0zf!s!noWL3j-NSZnR0 zCRd33X2e?}RP9U966DCyU1dX$L>W0MgM6*`sCZd)ElNIH5ET^eO)%#3&QX0tgnVgK z@5IO(y`*};YO5Qw}UJ%$QJRfJ!Z-03nD~<95pXP z{??fyQ~Jh9+vrXKm3oUrSu?tqrtg%EqbFH+kvRQDX(`O>{}PH@2<#U9&#h(s4wNUg&}MxVv;%Te;lh!JsA!MX-vGr0I1G3kz|tN?=?I_+a1`)I zz}tXRa^RS5v8PcyM_>=ud!hVixpYjNwHd{WYUzFvFCQP1mHZX0z1vNnb?>vg+um*) zf}2B<9~yc#BbNf2vCd#U)}q& zygc5t#zIrn#-Yz$Ehq$uhdU%LvbHwT`w>fU%ab}Ct|iJS87+i5{E$ctLY>T$iIJA- zSJZbE8!wc&DBH()^xm$ zbnMWo3Hg6GJqO9Ck*RXs#G&R0SEPJn;!w5Zj0nxx8+BuJA;HltF|a>{+LXT}2MLom zRu)VeoEdw4WEIe&C17q6x)TFTp#eTWDN5Mn8=|FfXT>jlY} zlDPleAmjQ5E3_3_KojoMQ!>0u5Gvjd6@7 zE=zt?8ZTFr#^kn_w+7B0SSx8R^Xi~wLN^PO%T!KVxh(!Vwe#vAmpP!)8bQxp+CAIo zxvN}S)`ccjmGuxg@@UzV7Qc&^xl=ldj&kgj*`k|#d`fwnlC%{?P3Xwf=CE#r+ zsJnND5tR2%y;bxu>Ml7mMXoF@&ykzU2SqoK5%uwaX8|wD56ioWm!x&t#H2+!Back46^G>g?m`(k zBU;`yBUSF4F<=tMQGc|j&gpb}JQYsIk|pyT&c*thXnqWE9B=~g7GN>p6aZ1&AG)Mw zL4|vnyV|Rtl-VXKln%AGwhsV5K<>9<^5z~?v%%PotXlib#mQLG2X?cq5QgE$g3c5 z9&oZvQXsWB2im{$IeUoSOAd2&jt?xLi{SqO&@7*rKQcI#askgSO*&?!h~aYWEPJDK zfm<6gkT~uiRFrQz5;kV|xKQm_utpEZLc@xBS&qdOL>JjD$Fr?OI&QZR>3A1xO*XTc?gO33J00=pF(aOn#EiqS3*TV|2Mu7S+xYk_275^3+uPr(v#5H+jNw9b{U1ztw3f=?URBi1;;c~JAHTvTe>?>4%4fw?*bC-1$r)BxFzP5HzKyF%= zAP&l1%evD#e{WfpxTA5@^0FWr#r?NsbYgvCNv1uB6F`5c*S*AD<@V|;^$>aLwp4pK zI^@{F;i;{G2g^AFKfMq7kCbt%JBtI2gI4zn5pT)`cMi0nY2_ER%=K$>_nmq5hL?f) zhVT`FJ{~}Y3%~)A!(u8Plc?4o=X5w1R#q>lSYGK}SW#8uaxd}de?xT^fV~uP+8?^W z?WI`jTl$`4r zJlJ99W{eue2K8H=ODYljyAjjvjSsGWLx=`B?Y|D?VArr){9$Z_ifTudTkk9LwLgdT$@5>E;9*Sl<^7{0t=qK|wrbzonyEre) zHx952CfgGoY!H2z_;CM+|FX-wm$EtCL;$UTpo zv$%<{wDFEdt3t$3`OQ<~sMVkygT+3n@0cV?+aem zl}?A(-8!exLWfX$m_7hyw)t~tD*<-{Fbcf_FcfsI zWX4!IWY_5W&(RVImOWaphJYO))KJN9ajdG--NuTo1tEN4-GT-1zWENPTVDi{&09mB zMqXCosMN87^rZlH6CEWF051&mZ6w$eO%Tv5BSZeYE6uWks?Rp2HJlI6z!LL^&abO> zdMhb0@kg{4IdRY%WZX;tMVfDaX(eIu%L^&3zvJasL|x-BUx60A7X1Wc0C%FaL2h_; zM#Ozor09cXq0f<5UoDLOJt|pQw(440cwnXI-gxjpk%(Q5x;21$v=jRMGU4^XV9r5j+1A=77gFQ2s z)ALPQwQ;SJ^ABdE^N4RI#86H?eQ;hfSM!iq1#VIRW$I<-q5if&b-i>P>X0`Q9h=)} zidO#Md2W|p<6KOZWHWFpma<0fJd`sgpu@j25bcQEl9ga}xD6YSwOuP?-pG!>8P7U7 z{*CeNPQKob{injlFh)n@F=)pKpGF?F&OC%G=n?Y!}T|0 z_F=t#4jz*Kzl5@TvSju^_Cvo2f{%T_So6%A){_0U%2O9|MYHkBB>G~kuEgN6hM&}Vl6pZhi zm)Ax+3H{;G_Ig9PyV@f8KRTNGtoj#_m1=~Dk>e#YL;|mL>1^AcCqaR(ceoU?O59Rht z*_NNEc&72hrG94Xt)MnZ?bEK7|Dj@v?D^@G80-oDP=p;Etvq^7OHPJ`D{O`j+8amgRic0%-4(hkcTx>^45^IS`W zW-?_p{_uH3M9OhA;;>6J(=?rD<`WR>WTzhn6~z)+j{|f7umgUIY8X@h0swR9R{-x4 z_+v4nQ%k2$8#}elV*BORAMEw8w0;@Dvl))rY-2&p)V~B^TJ^sHFpc`>0QTCyQ^_8$ zp9X=`tTQORN8k^3S1+s7YpV6HU{b4l1-UxqFsMHdfPXJ+=iu*ycmZ&cz#rSPJ7RNe zb@V@RMY}z+wF1jR_XDsND5&xRh=zG$0I_oKkHwZWDo$$r>Bm1sikE%oEaC?tzEE!k zi)^t^eHAQH#fvH|M2r?$YI2Au?T1n5Noal&BM7RjuJjtGYd)&*E6N;HcLVeW^ipR+ zgj;;C284?7_3uzKJqEz%Z?;cgunh&=kggs9UlgG2$W0+W8w3U5<4zVe^v8IR!BJMI z!!)!rx(y9_K`LCz6GA}Az@GUwn;wQy0Iv8tgo%%0tj0M;{gfi6T6>~NgPM{m2HHx% z_%gc6Q8HEBA%>{LPNH9*pFsL2;Aeu}R>QWOHhlsrXR}CEjhtYxx~-EK5`VL*f$Hr} zV(6Uz0WWU`V^KnKLuuJz>fUcL-b~DZT0DhMfddWvR=uveqSAF^cl*GwAMgU;?`mF} z=vB`Liq1sXtp$!q_6!{){U8Q<2TD96zo3M}fmI3w+nAg&Py?B=Kiav_;jO5x z@pvcGTNlRyw>}ni><8GtbohZk*y*VCBFC!_yhK?D>AwOdqQzB|CZGhL(u+}IH#v&O zKLU;bngBtNY$LFD)OkcE5CZlNoG6SrMJ?RKukK415q87ig#$^ipZhdhyO zor&^5H9SvrF8>Y6w-ETvSpPSjcl}l{1ty$3`2_LhMd~E}GU^9|Itwrx z@LTm|p6FAL*)$9sm~fu!uhE9b;Q;#$OR#Oe^|`3!DdrxF=+SS(o3obpZpOs%WR3?b z&u9~bW6tV%(gKHdo>ICI3CtHyO<-(+kzYTN`T_`_4_FAO1k3~Q^24+m6B$cgXSDs) zHcbnW*LGQKdInekSA2c*#a>J2HXS@9hT0g%!eED!PMGBPONn}`Pa7<%gcz!7hlnw$ z&tORnKt{2l1)QmwIXE0t?fVmX#eBFnMuf$+MJ<_5N2yLMH-DbT9 z6l3bgMz`z_f!%1mDjOlv#VS=hLUgjMh4Q`!Mu^|hu0Z9>I0Dk!jDXl3kXNtzj1q~7 zP3mmkgy}Gi{-Dw(i4nr73MPp`mM5vJ*L`aziI5=MM(DQ*aKF00 zOmvTa0A&ey5U^RjQzmYicT-hR&#~}vl-8lsM^V}e2x!R5jnxe36=X=?4$d`b__Vrb zipb*Eu2+1|PZ47>qThw)w}GP$rS;0Ji(D(_Uf-hn>%!fi$A_SSA|PIFEQ=yr4RH>DFn#8tfHL@toS`6*b~v6;UU)h$qyJI^i}F z&7CT0nV4o{F`OjLSJlhJns831NJVUD>L1I*esNOmUM}twK3}gDB3y_QYUoPw8ykmu zwso@FzLGBT-dFFgq>evUU#t{aw!=_5A7Zjq{3?-K59|3YOKKc0j}i2eQ8^sOXhNxN z8!_^ZKw6UqN$CL0vYtsLasu}8{UF3baE!C2s&MMhOMzn|r9dO9^#b)t^*)|-K#d3Ru z{+{~aVbMtxsh=Je4_Ho-D#v`A9uf6HqCrJJE~a&WkJ~j}K~tEU%ydevcwD4)@IpxN zGPmv_1GTucdggJ_Y4E$m?6=h}UQp3e>kqDVti&|waOE3cbsmCjz!xuX7s*=M*AN`; zaZ_OSRxEQYsdIB$-I9>1K~IXDF!1OAo3HXok)~OB>=A0i4l$(bN0@sy7d(bc*9(LX z(G5>=3-4DuM1PU0vY!@Th#VEZQ;Z}>yZ@?)Qx!W!7i;%$;{Qag-zk>GMxZ;0B~!4l z5*53PTKDu7?-KjyO``g#L39-*Dr2`u(8$RL?H0v0Ufe!(ds5xITVz^!dH)~v!fvrp zp)FMaKyhKk=oa7&N-_KMBc(KH~Be~V8fD95wnuUdkpp4df0@IEK5 zi2nAN5iK!47L_iGOCcP6~w#i)db>}R5~=}&`o zpZ%4h=DuUj)OQbP;u`ZW*DZm^t<>eU?8>#E5b zk+W~`E3=#Pi!S6(d@q0EiPAH3=AF%VHs?FJ@?u)nCfmgfdxO0>qu;)Y=8UnMA~r=_ zj7e6V9_Vx?z4yNOvoQmkV+LL|nIm#8CMHoQ+@eE9*G&urg5$1oDu3LzxTh&u9iVTLcHu^GN6UW31UV(lqy|${Y^XkDQ%OA=quiNEzwOe$&7`+C;Vnn)VBC*gvI+xj)vP zbAHC9A!yemN>mhI4wrnQgpTZ8|E_ zRL?i*Q>xH6DUCeiD|}P*HQ&nG(m3g8Nys6fWz{3nK!QKqxHD-n8fPeXqXG9HZj6KD z(hY-oL7hE8e%m$(GG#k&Q{pXACf@f=eM>Z%BVbLxW!_4!+e7E|%#$L){Uwp6(aE%J zd!FicP*ma&#PdTlVH`4{DFlJt=00Z++jL6u<9K z#0KNs+=!^K2>TipT3$MFlJf+5JO#j8CH;BuokWS>m>y7v|0?5`HNUz7H<`1f-y~Eh)eni(! zXvccSV7dJYnuby;Aeg`(Y|Ng=Fn|s81OO+~c&DU?;;{*mK0^Dy0I<9IBg(xswd3jR zL{}7UvVQ~^vj9Bl3&CUYEOEPQImyxEP_+(%0{Iv3tbF!EI-{4Mb~a!%;3$CIobywm zDn1n{(Vv0JtLzh$n$>!uY9^s}eJX}ieE95BvB8$!1-<1E_+z1GfoEm4vqf3bm3R6< zl;FO7Rg_pY>f4?mqZY;2{}sdg4DdOC%^0v7OlBB_EyW{d)AHEZa2>(I_GMG#;;}b? z*Be5!k(GS_@qiSBH@uR)o&HlVd?S{|CRlzWuA16mA2sG%(OJ#-mbStVROEN!#4w!B zC-*PkEo)17F?bE&-s^?cOpxXtyp?yzX`GIzUW)=6N=xP~4 z0*~w(@uLu8uy5o)#52J$xD%-~g;4WZkkMdLONWTU%>Q)0rfyc*)qYJ&*gRK@6JM)) zu8Bcnp*nJnURoOG^Z%FjVVk|$!C5b0@SGSzNj(LC0qOiEi$eyxJzk0%SF87E2z6O#!$rNy(X>8Y_!Ku5^l<>ZhH8I& z;EU%XwOZ3AS*8)g8Q)1w``DV^cG9HO(?fdstu43pn=zOj>SUU?H|0gxZ~pt@;e8if%b`Hx8jQL7PRunHr86YI&HJW|>W-p1w!Iv_gy3 zXs14j(ze*2!$_^3l^(CVN{815t^_IYb4Jdu4T9B^(OT9}h%o}!>ei|8TlpB^X%`Q8 z7vv#)1+Ifo@wmDct;O58?-q&aF+{+Hm_CQKRwHAy1r{fj$NBceXzQ)XT**ol6ZLXW z@a_OCRizy@#}t+v3h{_%#=gRS1n;4DvvfM6Qx3Zg?|n_gq=(sYZLNDV43O}irg zTU|@ja^P+0+DOX^s(8`2I9-bsu?1*{TT3!`HOe(=V}_P!TaGdZ)_S!+Ln{qi1_I!a z3d_{4adiT(?sr5L@Dge&;j%8^t z3L6s^5QiS3?&_@F8&(5xfD69lE?SB;nfv8Ecmmki5aUr=rAm8fMfJ64(54&>^LlOR zT^mmj-+OUKGcloeKmshN!&+<_x|`*u>=5fgy%zw{{ti`+6)o@bD(2G*0A7bWx=v*i z`6Hcmx=uF~tuOqRqUmy#m#g)#A%<`u`R_>If?UmEuGg>S@ zCrz4cx3xR@a*X0(@J&-MsAQ?DrI!xIJyWa^7b@nhtfW6&oS+UjkR6lK@NW&WfyYNT3-18muRS&-Q@Yc|mQ2Y+K!Dk0H(VRplXvP@ zF`K(l`wW0BRSO$8!N$S#0;ifjInkd~9}m|$*7G=cY^)6jk-gN;A4d6qMRm<`2VHp~ z@8WMx{80`vbhV3SwgLy>B|2M)4Fps9EiU)Gx&^IA@8_Vv^MHMT{Q#E5djJ-^S}{T! zC~AEBMreiR@Tp`2J(6@SahesSFdbCfXzi=gDWK5$Zx!(r8 z=%tj7L6#Hc#@8$dRB0i7^teaW6lw?R;du0=PCiE156~a*G&JP}vJ)lVAn{?E&Y>8V z@&`MRNLAMOEmP{;^lFADpZC<)(c}Q&2LEUP4d9|%KF`M_<#yFDM*Ev+@NFEceJjN6 z>UTxj04s~yrw$cqQ>=&4=7@?f)`nT%MERJSS*-P;i^_Y7wJLGN_hYg4MzDBYys;O%7Q#HoiRw33RaWaKP=5^Y7JyY}EndST z+sPOj9$7PXS+*TJOE*-!3wTA@%C)2@-b8|1R)zkU$}iWtTi*u553*-^qNr83m1~*e zuy0$rmTocL{C=XEW@-JBd2l09NIhayxPu#&SPqssYmB&8Dgt?eV&$8&qL}04xai8D6z4& zp~Oq)B1(bjfo&kr3qiV|oJNXFFitR7dwr6opl ze#OD&F&OXz)y<{dM*m1;hfC`!I{V&nY4=#HsHatpe_Z9OTBN-$>Up2~7>cZgwSzG~ z)ZVz-x*P;c1aUUFj(8|=hj=R&^A(am93Nx4tDQA2EOHL+haiH}f4)?*8Kz*Rc%~<# zo_plZ=9)~oCY^7Zcvp8q@eS0a$3}o$uH|Q(Q7)+FCE5US&S$IADnu;i^}4P6Q?G>dg3{fP+mKwBTUFuJS3EZ-^mB zK?A(tbJkGguSeOgmFW4@2W|LR*d--Y;1dR$6JU=TSE4T=mT@KH=vTr&osWAKZqp9h z8tHbl)Y?^*fe=)p|A9IkS7LukA5~A>^m4w2KJ8d$1n8s0V;pQpP@S=H^Cq;Oy7z}x z(mB~%Q$rWfoQd*1IXlo5wBso9BT1+DT3NHu>#a5HhQ;L%bHxf@V#4%mEBHsm@+)6x}A!LpMNQ zz+eDpVE8cA_{SP6P{OBY`X#_m06vDXgG4|sJ4hNz_^89Uw$DImDQLfHMv8!$hUebk(*Fz@Mj2b(688u_+IL5tR$}S1)*|Rb`kUi7O#i-p j?HHf9P5Z{2cdJ!&xoWyOe#7+d+t;oQ_hr{>zli?@vtSBN diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index 93c6a9a089..2b33f303b9 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -6,7 +6,7 @@ import requests import time import litellm from typing import Callable -from litellm.utils import ModelResponse, Choices, Message +from litellm.utils import ModelResponse, Choices, Message, CustomStreamWrapper from typing import Optional from .prompt_templates.factory import prompt_factory, custom_prompt @@ -65,12 +65,17 @@ class HuggingfaceConfig(): and not isinstance(v, (types.FunctionType, types.BuiltinFunctionType, classmethod, staticmethod)) and v is not None} -def validate_environment(api_key): - headers = { +def validate_environment(api_key, headers): + default_headers = { "content-type": "application/json", } - if api_key: - headers["Authorization"] = f"Bearer {api_key}" + if api_key and headers is None: + default_headers["Authorization"] = f"Bearer {api_key}" # Huggingface Inference Endpoint default is to accept bearer tokens + headers = default_headers + elif headers: + headers=headers + else: + headers = default_headers return headers tgi_models_cache = None @@ -125,6 +130,7 @@ def completion( model: str, messages: list, api_base: Optional[str], + headers: Optional[dict], model_response: ModelResponse, print_verbose: Callable, encoding, @@ -135,7 +141,8 @@ def completion( litellm_params=None, logger_fn=None, ): - headers = validate_environment(api_key) + print(f'headers inside hf rest api: {headers}') + headers = validate_environment(api_key, headers) task = get_hf_task_for_model(model) print_verbose(f"{model}, {task}") completion_url = "" @@ -227,7 +234,7 @@ def completion( logging_obj.pre_call( input=input_text, api_key=api_key, - additional_args={"complete_input_dict": data, "task": task}, + additional_args={"complete_input_dict": data, "task": task, "headers": headers}, ) ## COMPLETION CALL if "stream" in optional_params and optional_params["stream"] == True: @@ -244,20 +251,43 @@ def completion( headers=headers, data=json.dumps(data) ) - ## LOGGING - logging_obj.post_call( - input=input_text, - api_key=api_key, - original_response=response.text, - additional_args={"complete_input_dict": data, "task": task}, - ) - ## RESPONSE OBJECT - try: - completion_response = response.json() - except: - raise HuggingfaceError( - message=response.text, status_code=response.status_code + + ## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten) + is_streamed = False + print(f"response keys: {response.__dict__.keys()}") + print(f"response keys: {response.__dict__['headers']}") + if response.__dict__['headers']["Content-Type"] == "text/event-stream": + is_streamed = True + + # iterate over the complete streamed response, and return the final answer + if is_streamed: + streamed_response = CustomStreamWrapper(completion_stream=response.iter_lines(), model=model, custom_llm_provider="huggingface", logging_obj=logging_obj) + content = "" + for chunk in streamed_response: + content += chunk["choices"][0]["delta"]["content"] + completion_response = [{"generated_text": content}] + ## LOGGING + logging_obj.post_call( + input=input_text, + api_key=api_key, + original_response=completion_response, + additional_args={"complete_input_dict": data, "task": task}, ) + else: + ## LOGGING + logging_obj.post_call( + input=input_text, + api_key=api_key, + original_response=response.text, + additional_args={"complete_input_dict": data, "task": task}, + ) + ## RESPONSE OBJECT + try: + completion_response = response.json() + except: + raise HuggingfaceError( + message=response.text, status_code=response.status_code + ) print_verbose(f"response: {completion_response}") if isinstance(completion_response, dict) and "error" in completion_response: print_verbose(f"completion error: {completion_response['error']}") diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index ab7a3560ca..319e7047f2 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -22,7 +22,9 @@ def llama_2_chat_pt(messages): "post_message": "\n" # follows this - https://replicate.com/blog/how-to-prompt-llama } }, - messages=messages + messages=messages, + bos_token="", + eos_token="" ) return prompt @@ -218,14 +220,26 @@ def function_call_prompt(messages: list, functions: list): # Custom prompt template -def custom_prompt(role_dict: dict, messages: list, initial_prompt_value: str="", final_prompt_value: str=""): - prompt = initial_prompt_value +def custom_prompt(role_dict: dict, messages: list, initial_prompt_value: str="", final_prompt_value: str="", bos_token: str="", eos_token: str=""): + prompt = bos_token + initial_prompt_value + bos_open = True + ## a bos token is at the start of a system / human message + ## an eos token is at the end of the assistant response to the message for message in messages: role = message["role"] + + if role in ["system", "human"] and not bos_open: + prompt += bos_token + bos_open = True + pre_message_str = role_dict[role]["pre_message"] if role in role_dict and "pre_message" in role_dict[role] else "" post_message_str = role_dict[role]["post_message"] if role in role_dict and "post_message" in role_dict[role] else "" prompt += pre_message_str + message["content"] + post_message_str - + + if role == "assistant": + prompt += eos_token + bos_open = False + prompt += final_prompt_value return prompt diff --git a/litellm/main.py b/litellm/main.py index 445e6a5fb2..70ed8da182 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -230,9 +230,10 @@ def completion( id = kwargs.get('id', None) metadata = kwargs.get('metadata', None) fallbacks = kwargs.get('fallbacks', None) + headers = kwargs.get("headers", None) ######## end of unpacking kwargs ########### openai_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout", "api_base", "api_version", "api_key"] - litellm_params = ["metadata", "acompletion", "caching", "return_async", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "metadata", "fallbacks", "azure"] + litellm_params = ["metadata", "acompletion", "caching", "return_async", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "fallbacks", "azure", "headers"] default_params = openai_params + litellm_params non_default_params = {k: v for k,v in kwargs.items() if k not in default_params} # model-specific params - pass them straight to the model/provider if mock_response: @@ -775,10 +776,16 @@ def completion( or os.environ.get("HUGGINGFACE_API_KEY") or litellm.api_key ) + hf_headers = ( + headers + or litellm.headers + ) + print(f'headers before hf rest api: {hf_headers}') model_response = huggingface_restapi.completion( model=model, messages=messages, api_base=api_base, # type: ignore + headers=hf_headers, model_response=model_response, print_verbose=print_verbose, optional_params=optional_params, diff --git a/litellm/proxy/api_log.json b/litellm/proxy/api_log.json index 6ce5fb2af6..62020b0250 100644 --- a/litellm/proxy/api_log.json +++ b/litellm/proxy/api_log.json @@ -1,954 +1,17 @@ { - "20231012182157625128": { + "20231014160921359878": { "pre_api_call": { - "model": "anthropic.claude-v2", + "model": "codellama/CodeLlama-7b-Instruct-hf", "messages": [ { "role": "user", - "content": "what do you know?" + "content": "hey" } ], "optional_params": { - "temperature": 0.1, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "902640b5-4a26-4629-932d-35d6cf4e1635", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: what do you know?\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: what do you know?\\n\\nAssistant: \", \"temperature\": 0.1, \"max_tokens_to_sample\": 256}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "what do you know?" - } - ], - "optional_params": { - "temperature": 0.1, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "902640b5-4a26-4629-932d-35d6cf4e1635", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-1757e5ea-71f2-44a2-9d8d-1ba8238a7c99", - "object": "chat.completion.chunk", - "created": 1697160117, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " I'm Claude, an AI assistant created by Anthropic. I don't actually have general knowledge about the world. I'm an AI conversational model trained by Anthropic to be helpful, harmless, and honest." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013154412334882": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "44d967c2-456e-4ffa-b7c4-4551e178b0d7", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "44d967c2-456e-4ffa-b7c4-4551e178b0d7", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-a569121f-86e3-432a-9e7f-189207791ad2", - "object": "chat.completion.chunk", - "created": 1697237052, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013154448360412": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "b70e6392-90dc-40b0-87bf-9b48c843c847", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "b70e6392-90dc-40b0-87bf-9b48c843c847", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Enable CORS in FastAPI", - "stop_reason": "stop_sequence" - } - } - }, - "20231013154450195512": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Response:\n Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "fd366c97-9868-4c16-995c-a65ef0fd844f", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Response:\n Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\napp = FastAPI()\\n\\nmake my fastapi cors compatible\\n### Instruction:\\napp = FastAPI()\\n\\nmake my fastapi cors compatible\\n### Instruction:\\napp = FastAPI()\\n\\nmake my fastapi cors compatible\\n### Response:\\n Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Response:\n Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "fd366c97-9868-4c16-995c-a65ef0fd844f", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Instruction:\napp = FastAPI()\n\nmake my fastapi cors compatible\n### Response:\n Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\napp = FastAPI()\\n\\nmake my fastapi cors compatible\\n### Instruction:\\napp = FastAPI()\\n\\nmake my fastapi cors compatible\\n### Instruction:\\napp = FastAPI()\\n\\nmake my fastapi cors compatible\\n### Response:\\n Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Making FastAPI CORS Compatible", - "stop_reason": "stop_sequence" - } - } - }, - "20231013154547274601": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "assistant", - "content": " Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app." - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4f2c1100-7441-45a9-affa-9687de013bc9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nAssistant: Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n\nHuman: just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nAssistant: Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\n\\nHuman: just do something like this: \\nfrom flask import Flask\\nfrom flask_cors import CORS\\n\\napp = Flask(__name__)\\nCORS(app)\\n\\nbut for flask\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013154549261702": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" To enable CORS in Flask, you can use the Flask-CORS extension.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "3987d37c-194b-462c-9803-53f2001a6712", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" To enable CORS in Flask, you can use the Flask-CORS extension.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" To enable CORS in Flask, you can use the Flask-CORS extension.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" To enable CORS in Flask, you can use the Flask-CORS extension.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "3987d37c-194b-462c-9803-53f2001a6712", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" To enable CORS in Flask, you can use the Flask-CORS extension.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" To enable CORS in Flask, you can use the Flask-CORS extension.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Enable CORS in Flask with Flask-CORS", - "stop_reason": "stop_sequence" - } - } - }, - "20231013154552224346": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "assistant", - "content": " Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app." - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask" - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for fastapi" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "14690436-4133-4ccf-a70f-aa289a967f8a", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nAssistant: Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n\nHuman: just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask\n\nHuman: just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for fastapi\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nAssistant: Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\n\\nHuman: just do something like this: \\nfrom flask import Flask\\nfrom flask_cors import CORS\\n\\napp = Flask(__name__)\\nCORS(app)\\n\\nbut for flask\\n\\nHuman: just do something like this: \\nfrom flask import Flask\\nfrom flask_cors import CORS\\n\\napp = Flask(__name__)\\nCORS(app)\\n\\nbut for fastapi\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "assistant", - "content": " Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app." - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask" - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for fastapi" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "14690436-4133-4ccf-a70f-aa289a967f8a", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-32a68738-46c4-45ff-94c2-25d8fc0ea3f0", - "object": "chat.completion.chunk", - "created": 1697237152, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\", \n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware, \n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe main differences compared to Flask:\n\n- Import CORSMiddleware from fastapi.middleware.cors\n- Use app.add_middleware() instead of CORS()\n- Specify the middleware before routes just like in Flask\n\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\n\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013154602476284": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\", \n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware, \n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe main differences compared to Flask:\n\n- Import CORSMiddleware from fastapi.middleware.cors\n- Use app.add_middleware() instead of CORS()\n- Specify the middleware before routes just like in Flask\n\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\n\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "6b158a57-2c7f-4eb7-87a4-a6fe4fc546ae", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\", \n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware, \n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe main differences compared to Flask:\n\n- Import CORSMiddleware from fastapi.middleware.cors\n- Use app.add_middleware() instead of CORS()\n- Specify the middleware before routes just like in Flask\n\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\n\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\\n\\n```python\\nfrom fastapi import FastAPI\\nfrom fastapi.middleware.cors import CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\", \\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware, \\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThe main differences compared to Flask:\\n\\n- Import CORSMiddleware from fastapi.middleware.cors\\n- Use app.add_middleware() instead of CORS()\\n- Specify the middleware before routes just like in Flask\\n\\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\\n\\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\", \n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware, \n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe main differences compared to Flask:\n\n- Import CORSMiddleware from fastapi.middleware.cors\n- Use app.add_middleware() instead of CORS()\n- Specify the middleware before routes just like in Flask\n\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\n\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "6b158a57-2c7f-4eb7-87a4-a6fe4fc546ae", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\", \n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware, \n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe main differences compared to Flask:\n\n- Import CORSMiddleware from fastapi.middleware.cors\n- Use app.add_middleware() instead of CORS()\n- Specify the middleware before routes just like in Flask\n\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\n\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" To enable CORS in FastAPI, you can use the CORSMiddleware similar to how it is done in Flask:\\n\\n```python\\nfrom fastapi import FastAPI\\nfrom fastapi.middleware.cors import CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\", \\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware, \\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThe main differences compared to Flask:\\n\\n- Import CORSMiddleware from fastapi.middleware.cors\\n- Use app.add_middleware() instead of CORS()\\n- Specify the middleware before routes just like in Flask\\n\\nOtherwise the usage is similar - define allowed origins, set allow_credentials, allow_methods, allow_headers etc.\\n\\nSo this will enable CORS in your FastAPI app for the specified origins and allowed options.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Enabling CORS in FastAPI", - "stop_reason": "stop_sequence" - } - } - }, - "20231013154610735962": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "assistant", - "content": " Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app." - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask" - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for fastapi" - }, - { - "role": "user", - "content": "just allow all origins " - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "1cc72768-9cfc-424d-98be-062a2871b6f8", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nHuman: app = FastAPI()\n\nmake my fastapi cors compatible\n\nAssistant: Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app.\n\nHuman: just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask\n\nHuman: just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for fastapi\n\nHuman: just allow all origins \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nHuman: app = FastAPI()\\n\\nmake my fastapi cors compatible\\n\\nAssistant: Here is how you can make a FastAPI app CORS compatible:\\n\\n```python\\nfrom fastapi import FastAPI, CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\n \\\"http://localhost.tiangolo.com\\\",\\n \\\"https://localhost.tiangolo.com\\\",\\n \\\"http://localhost\\\",\\n \\\"http://localhost:8080\\\",\\n]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \\\"*\\\" allows all methods and headers.\\n\\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\\n\\nSome key points:\\n\\n- The CORSMiddleware should be added before any routers.\\n- The allowed origins can be restricted to your frontend URLs only.\\n- Allow credentials only if needed.\\n- Restrict methods and headers to only required ones if possible.\\n\\nSo this sets up a basic permissive CORS config for a FastAPI app.\\n\\nHuman: just do something like this: \\nfrom flask import Flask\\nfrom flask_cors import CORS\\n\\napp = Flask(__name__)\\nCORS(app)\\n\\nbut for flask\\n\\nHuman: just do something like this: \\nfrom flask import Flask\\nfrom flask_cors import CORS\\n\\napp = Flask(__name__)\\nCORS(app)\\n\\nbut for fastapi\\n\\nHuman: just allow all origins \\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "user", - "content": "app = FastAPI()\n\nmake my fastapi cors compatible" - }, - { - "role": "assistant", - "content": " Here is how you can make a FastAPI app CORS compatible:\n\n```python\nfrom fastapi import FastAPI, CORSMiddleware\n\napp = FastAPI()\n\norigins = [\n \"http://localhost.tiangolo.com\",\n \"https://localhost.tiangolo.com\",\n \"http://localhost\",\n \"http://localhost:8080\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThis enables CORS for the specified origins. The allow_credentials=True allows sending cookies from the frontend. The allow_methods and allow_headers set to \"*\" allows all methods and headers.\n\nYou can also set more restrictive CORS settings by specifying only the required origins, methods, headers etc.\n\nSome key points:\n\n- The CORSMiddleware should be added before any routers.\n- The allowed origins can be restricted to your frontend URLs only.\n- Allow credentials only if needed.\n- Restrict methods and headers to only required ones if possible.\n\nSo this sets up a basic permissive CORS config for a FastAPI app." - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for flask" - }, - { - "role": "user", - "content": "just do something like this: \nfrom flask import Flask\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\nbut for fastapi" - }, - { - "role": "user", - "content": "just allow all origins " - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "1cc72768-9cfc-424d-98be-062a2871b6f8", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-e074b9c9-b64d-458c-a20c-5db299ebd44c", - "object": "chat.completion.chunk", - "created": 1697237170, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Here is how to enable CORS for all origins in FastAPI:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe key points:\n\n- Set origins to [\"*\"] to allow all origins\n- Enable allow_credentials if you want to allow sending cookies\n- Allow all methods and headers with [\"*\"]\n\nThis enables CORS access from any origin to the FastAPI app.\n\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\n\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013154630846250": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how to enable CORS for all origins in FastAPI:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe key points:\n\n- Set origins to [\"*\"] to allow all origins\n- Enable allow_credentials if you want to allow sending cookies\n- Allow all methods and headers with [\"*\"]\n\nThis enables CORS access from any origin to the FastAPI app.\n\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\n\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "658cf642-541f-4461-be7b-e3e48d591bdf", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how to enable CORS for all origins in FastAPI:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe key points:\n\n- Set origins to [\"*\"] to allow all origins\n- Enable allow_credentials if you want to allow sending cookies\n- Allow all methods and headers with [\"*\"]\n\nThis enables CORS access from any origin to the FastAPI app.\n\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\n\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how to enable CORS for all origins in FastAPI:\\n\\n```python\\nfrom fastapi import FastAPI\\nfrom fastapi.middleware.cors import CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\\"*\\\"]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThe key points:\\n\\n- Set origins to [\\\"*\\\"] to allow all origins\\n- Enable allow_credentials if you want to allow sending cookies\\n- Allow all methods and headers with [\\\"*\\\"]\\n\\nThis enables CORS access from any origin to the FastAPI app.\\n\\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\\n\\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how to enable CORS for all origins in FastAPI:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe key points:\n\n- Set origins to [\"*\"] to allow all origins\n- Enable allow_credentials if you want to allow sending cookies\n- Allow all methods and headers with [\"*\"]\n\nThis enables CORS access from any origin to the FastAPI app.\n\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\n\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "658cf642-541f-4461-be7b-e3e48d591bdf", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how to enable CORS for all origins in FastAPI:\n\n```python\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n```\n\nThe key points:\n\n- Set origins to [\"*\"] to allow all origins\n- Enable allow_credentials if you want to allow sending cookies\n- Allow all methods and headers with [\"*\"]\n\nThis enables CORS access from any origin to the FastAPI app.\n\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\n\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how to enable CORS for all origins in FastAPI:\\n\\n```python\\nfrom fastapi import FastAPI\\nfrom fastapi.middleware.cors import CORSMiddleware\\n\\napp = FastAPI()\\n\\norigins = [\\\"*\\\"]\\n\\napp.add_middleware(\\n CORSMiddleware,\\n allow_origins=origins,\\n allow_credentials=True,\\n allow_methods=[\\\"*\\\"],\\n allow_headers=[\\\"*\\\"],\\n)\\n```\\n\\nThe key points:\\n\\n- Set origins to [\\\"*\\\"] to allow all origins\\n- Enable allow_credentials if you want to allow sending cookies\\n- Allow all methods and headers with [\\\"*\\\"]\\n\\nThis enables CORS access from any origin to the FastAPI app.\\n\\nDo note that allowing all origins is not suitable for production applications. It's better to restrict origins to your specific frontend URLs for security.\\n\\nBut for development, allowing all origins is a quick way to enable CORS in FastAPI.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Enable CORS in FastAPI for all origins", - "stop_reason": "stop_sequence" - } - } - }, - "20231013200437287062": { - "pre_api_call": { - "model": "codellama/CodeLlama-34b-Instruct-hf", - "messages": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "optional_params": { - "details": true, - "return_full_text": false - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "huggingface", - "api_base": "https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud", - "litellm_call_id": "b40db754-5536-4ecd-837f-cda44f83424c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "[INST] this is a test request, acknowledge that you got it [/INST]\n", - "api_key": "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", - "additional_args": { - "complete_input_dict": { - "inputs": "[INST] this is a test request, acknowledge that you got it [/INST]\n", - "parameters": { - "details": true, - "return_full_text": false - }, - "stream": false - }, - "task": "text-generation-inference" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "codellama/CodeLlama-34b-Instruct-hf", - "messages": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "optional_params": { - "details": true, - "return_full_text": false - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "huggingface", - "api_base": "https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud", - "litellm_call_id": "b40db754-5536-4ecd-837f-cda44f83424c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "[INST] this is a test request, acknowledge that you got it [/INST]\n", - "api_key": "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", - "additional_args": { - "complete_input_dict": { - "inputs": "[INST] this is a test request, acknowledge that you got it [/INST]\n", - "parameters": { - "details": true, - "return_full_text": false - }, - "stream": false - }, - "task": "text-generation-inference" - }, - "log_event_type": "post_api_call", - "original_response": "[{\"generated_text\":\"\\n[INST]Please provide me with your name and age[/INST]\\n\\n[INST\",\"details\":{\"finish_reason\":\"length\",\"generated_tokens\":20,\"seed\":null,\"prefill\":[],\"tokens\":[{\"id\":13,\"text\":\"\\n\",\"logprob\":-1.9580078,\"special\":false},{\"id\":29961,\"text\":\"[\",\"logprob\":-1.8720703,\"special\":false},{\"id\":25580,\"text\":\"INST\",\"logprob\":-2.0449219,\"special\":false},{\"id\":29962,\"text\":\"]\",\"logprob\":-0.037017822,\"special\":false},{\"id\":12148,\"text\":\"Please\",\"logprob\":-3.15625,\"special\":false},{\"id\":3867,\"text\":\" provide\",\"logprob\":-1.7675781,\"special\":false},{\"id\":592,\"text\":\" me\",\"logprob\":-0.9614258,\"special\":false},{\"id\":411,\"text\":\" with\",\"logprob\":-0.42285156,\"special\":false},{\"id\":596,\"text\":\" your\",\"logprob\":-0.86328125,\"special\":false},{\"id\":1024,\"text\":\" name\",\"logprob\":-1.0976562,\"special\":false},{\"id\":322,\"text\":\" and\",\"logprob\":-0.77246094,\"special\":false},{\"id\":5046,\"text\":\" age\",\"logprob\":-1.1455078,\"special\":false},{\"id\":29961,\"text\":\"[\",\"logprob\":-0.59033203,\"special\":false},{\"id\":29914,\"text\":\"/\",\"logprob\":-0.0069770813,\"special\":false},{\"id\":25580,\"text\":\"INST\",\"logprob\":-0.00042438507,\"special\":false},{\"id\":29962,\"text\":\"]\",\"logprob\":-0.03704834,\"special\":false},{\"id\":13,\"text\":\"\\n\",\"logprob\":-0.34960938,\"special\":false},{\"id\":13,\"text\":\"\\n\",\"logprob\":-0.49316406,\"special\":false},{\"id\":29961,\"text\":\"[\",\"logprob\":-0.5678711,\"special\":false},{\"id\":25580,\"text\":\"INST\",\"logprob\":-1.5263672,\"special\":false}]}}]" - } - }, - "20231013200440365413": { - "pre_api_call": { - "model": "codellama/CodeLlama-34b-Instruct-hf", - "messages": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "optional_params": { "stream": true, + "max_new_tokens": 1024, "details": true, "return_full_text": false }, @@ -959,39 +22,46 @@ "logger_fn": null, "verbose": false, "custom_llm_provider": "huggingface", - "api_base": "https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud", - "litellm_call_id": "24732ff3-8936-4ad8-97c5-013aa89e7aba", + "api_base": "https://app.baseten.co/models/pP8JeaB/predict", + "litellm_call_id": "d75891a0-d567-470a-a6cd-137e698da092", "model_alias_map": {}, "completion_call_id": null, "metadata": null, "stream_response": {} }, - "input": "[INST] this is a test request, acknowledge that you got it [/INST]\n", + "input": "[INST] hey [/INST]\n", "api_key": "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", "additional_args": { "complete_input_dict": { - "inputs": "[INST] this is a test request, acknowledge that you got it [/INST]\n", + "inputs": "[INST] hey [/INST]\n", "parameters": { + "temperature": 0.5, "stream": true, + "max_new_tokens": 1024, "details": true, "return_full_text": false }, "stream": true }, - "task": "text-generation-inference" + "task": "text-generation-inference", + "headers": { + "Authorization": "Api-Key SQqH1uZg.SSN79Bq997k4TRdzW9HBCghx9KyL0EJA" + } }, "log_event_type": "pre_api_call" }, "post_api_call": { - "model": "codellama/CodeLlama-34b-Instruct-hf", + "model": "codellama/CodeLlama-7b-Instruct-hf", "messages": [ { "role": "user", - "content": "this is a test request, acknowledge that you got it" + "content": "hey" } ], "optional_params": { + "temperature": 0.5, "stream": true, + "max_new_tokens": 1024, "details": true, "return_full_text": false }, @@ -1002,8 +72,8 @@ "logger_fn": null, "verbose": false, "custom_llm_provider": "huggingface", - "api_base": "https://ji16r2iys9a8rjk2.us-east-1.aws.endpoints.huggingface.cloud", - "litellm_call_id": "24732ff3-8936-4ad8-97c5-013aa89e7aba", + "api_base": "https://app.baseten.co/models/pP8JeaB/predict", + "litellm_call_id": "d75891a0-d567-470a-a6cd-137e698da092", "model_alias_map": {}, "completion_call_id": null, "metadata": null, @@ -1014,6310 +84,4 @@ "additional_args": {}, "log_event_type": "post_api_call", "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-531336f4-ac0a-4a8c-b9c1-dc4cc57974e2", - "object": "chat.completion.chunk", - "created": 1697252680, - "model": "codellama/CodeLlama-34b-Instruct-hf", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "\n[INST]Please provide me with your name and age[/INST]\n\n[INST" - }, - "finish_reason": "length" - } - ] - } - } - }, - "20231013201452138120": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "can you print 'Done successfully' in green in python " - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "1c490fe8-4a85-4944-8ee7-d640ac8bc0fd", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: hey\n\nHuman: can you print 'Done successfully' in green in python \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: hey\\n\\nHuman: can you print 'Done successfully' in green in python \\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "can you print 'Done successfully' in green in python " - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "1c490fe8-4a85-4944-8ee7-d640ac8bc0fd", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-b6e23895-bac8-4203-b992-7324b9aae855", - "object": "chat.completion.chunk", - "created": 1697253292, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013201500572768": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "a5660679-aa71-4fc8-94b5-da60be471d59", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how you can print 'Done successfully' in green text in Python:\\n\\n```python\\nfrom termcolor import colored\\n\\nprint(colored('Done successfully', 'green'))\\n```\\n\\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\\n\\nThis will print the text 'Done successfully' in green color.\\n\\nThe available color names that can be passed are:\\n\\n- 'grey'\\n- 'red' \\n- 'green'\\n- 'yellow' \\n- 'blue'\\n- 'magenta' \\n- 'cyan' \\n- 'white'\\n\\nSo this provides a simple way to add some color formatting to text printed in Python.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "a5660679-aa71-4fc8-94b5-da60be471d59", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how you can print 'Done successfully' in green text in Python:\\n\\n```python\\nfrom termcolor import colored\\n\\nprint(colored('Done successfully', 'green'))\\n```\\n\\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\\n\\nThis will print the text 'Done successfully' in green color.\\n\\nThe available color names that can be passed are:\\n\\n- 'grey'\\n- 'red' \\n- 'green'\\n- 'yellow' \\n- 'blue'\\n- 'magenta' \\n- 'cyan' \\n- 'white'\\n\\nSo this provides a simple way to add some color formatting to text printed in Python.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Print colored text in Python using termcolor module", - "stop_reason": "stop_sequence" - } - } - }, - "20231013201503544188": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Instruction:\ncan you print 'Done successfully' in green in python \n### Response:\n Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4ffe32f9-397a-4740-bc08-0c85db94a5bc", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\nhey\n### Instruction:\ncan you print 'Done successfully' in green in python \n### Response:\n Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nhey\\n### Instruction:\\ncan you print 'Done successfully' in green in python \\n### Response:\\n Here is how you can print 'Done successfully' in green text in Python:\\n\\n```python\\nfrom termcolor import colored\\n\\nprint(colored('Done successfully', 'green'))\\n```\\n\\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\\n\\nThis will print the text 'Done successfully' in green color.\\n\\nThe available color names that can be passed are:\\n\\n- 'grey'\\n- 'red' \\n- 'green'\\n- 'yellow' \\n- 'blue'\\n- 'magenta' \\n- 'cyan' \\n- 'white'\\n\\nSo this provides a simple way to add some color formatting to text printed in Python.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Instruction:\ncan you print 'Done successfully' in green in python \n### Response:\n Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4ffe32f9-397a-4740-bc08-0c85db94a5bc", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\nhey\n### Instruction:\ncan you print 'Done successfully' in green in python \n### Response:\n Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nhey\\n### Instruction:\\ncan you print 'Done successfully' in green in python \\n### Response:\\n Here is how you can print 'Done successfully' in green text in Python:\\n\\n```python\\nfrom termcolor import colored\\n\\nprint(colored('Done successfully', 'green'))\\n```\\n\\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\\n\\nThis will print the text 'Done successfully' in green color.\\n\\nThe available color names that can be passed are:\\n\\n- 'grey'\\n- 'red' \\n- 'green'\\n- 'yellow' \\n- 'blue'\\n- 'magenta' \\n- 'cyan' \\n- 'white'\\n\\nSo this provides a simple way to add some color formatting to text printed in Python.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Printing text in green in Python", - "stop_reason": "stop_sequence" - } - } - }, - "20231013201524163532": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "can you print 'Done successfully' in green in python " - }, - { - "role": "assistant", - "content": " Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python." - }, - { - "role": "user", - "content": "no do it like this: \\033[1;37m" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4c3ef670-bf78-4824-990d-4e69dde89bc1", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: hey\n\nHuman: can you print 'Done successfully' in green in python \n\nAssistant: Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python.\n\nHuman: no do it like this: \\033[1;37m\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: hey\\n\\nHuman: can you print 'Done successfully' in green in python \\n\\nAssistant: Here is how you can print 'Done successfully' in green text in Python:\\n\\n```python\\nfrom termcolor import colored\\n\\nprint(colored('Done successfully', 'green'))\\n```\\n\\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\\n\\nThis will print the text 'Done successfully' in green color.\\n\\nThe available color names that can be passed are:\\n\\n- 'grey'\\n- 'red' \\n- 'green'\\n- 'yellow' \\n- 'blue'\\n- 'magenta' \\n- 'cyan' \\n- 'white'\\n\\nSo this provides a simple way to add some color formatting to text printed in Python.\\n\\nHuman: no do it like this: \\\\033[1;37m\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "can you print 'Done successfully' in green in python " - }, - { - "role": "assistant", - "content": " Here is how you can print 'Done successfully' in green text in Python:\n\n```python\nfrom termcolor import colored\n\nprint(colored('Done successfully', 'green'))\n```\n\nTo print text in different colors in Python, we need to import the termcolor module. Then we can pass the text and the color name to the colored() function.\n\nThis will print the text 'Done successfully' in green color.\n\nThe available color names that can be passed are:\n\n- 'grey'\n- 'red' \n- 'green'\n- 'yellow' \n- 'blue'\n- 'magenta' \n- 'cyan' \n- 'white'\n\nSo this provides a simple way to add some color formatting to text printed in Python." - }, - { - "role": "user", - "content": "no do it like this: \\033[1;37m" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4c3ef670-bf78-4824-990d-4e69dde89bc1", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-1f9a6a04-9b9c-4a87-ae6e-2dedcc7adfeb", - "object": "chat.completion.chunk", - "created": 1697253324, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\n\n```python\nprint(\"\\033[1;32mDone successfully\\033[0m\")\n```\n\nThe ANSI escape code `\\033[1;32m` sets the text color to green. \n\n`\\033[0m` resets the text formatting back to default.\n\nSo the full code is:\n\n`\\033[1;32m` - Set color to green \n\n`Done successfully` - Print text\n\n`\\033[0m` - Reset color back to default\n\nThis will print 'Done successfully' in green color without needing to import any module.\n\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes." - }, - "finish_reason": "stop" - } - ] - } - } - }, - "20231013201538894266": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\n\n```python\nprint(\"\\033[1;32mDone successfully\\033[0m\")\n```\n\nThe ANSI escape code `\\033[1;32m` sets the text color to green. \n\n`\\033[0m` resets the text formatting back to default.\n\nSo the full code is:\n\n`\\033[1;32m` - Set color to green \n\n`Done successfully` - Print text\n\n`\\033[0m` - Reset color back to default\n\nThis will print 'Done successfully' in green color without needing to import any module.\n\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "3529c05a-c770-4d2e-b19c-5bada33b983f", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\n\n```python\nprint(\"\\033[1;32mDone successfully\\033[0m\")\n```\n\nThe ANSI escape code `\\033[1;32m` sets the text color to green. \n\n`\\033[0m` resets the text formatting back to default.\n\nSo the full code is:\n\n`\\033[1;32m` - Set color to green \n\n`Done successfully` - Print text\n\n`\\033[0m` - Reset color back to default\n\nThis will print 'Done successfully' in green color without needing to import any module.\n\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\\n\\n```python\\nprint(\\\"\\\\033[1;32mDone successfully\\\\033[0m\\\")\\n```\\n\\nThe ANSI escape code `\\\\033[1;32m` sets the text color to green. \\n\\n`\\\\033[0m` resets the text formatting back to default.\\n\\nSo the full code is:\\n\\n`\\\\033[1;32m` - Set color to green \\n\\n`Done successfully` - Print text\\n\\n`\\\\033[0m` - Reset color back to default\\n\\nThis will print 'Done successfully' in green color without needing to import any module.\\n\\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\n\n```python\nprint(\"\\033[1;32mDone successfully\\033[0m\")\n```\n\nThe ANSI escape code `\\033[1;32m` sets the text color to green. \n\n`\\033[0m` resets the text formatting back to default.\n\nSo the full code is:\n\n`\\033[1;32m` - Set color to green \n\n`Done successfully` - Print text\n\n`\\033[0m` - Reset color back to default\n\nThis will print 'Done successfully' in green color without needing to import any module.\n\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "3529c05a-c770-4d2e-b19c-5bada33b983f", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\n\n```python\nprint(\"\\033[1;32mDone successfully\\033[0m\")\n```\n\nThe ANSI escape code `\\033[1;32m` sets the text color to green. \n\n`\\033[0m` resets the text formatting back to default.\n\nSo the full code is:\n\n`\\033[1;32m` - Set color to green \n\n`Done successfully` - Print text\n\n`\\033[0m` - Reset color back to default\n\nThis will print 'Done successfully' in green color without needing to import any module.\n\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is how you can print 'Done successfully' in green color using ANSI escape codes in Python:\\n\\n```python\\nprint(\\\"\\\\033[1;32mDone successfully\\\\033[0m\\\")\\n```\\n\\nThe ANSI escape code `\\\\033[1;32m` sets the text color to green. \\n\\n`\\\\033[0m` resets the text formatting back to default.\\n\\nSo the full code is:\\n\\n`\\\\033[1;32m` - Set color to green \\n\\n`Done successfully` - Print text\\n\\n`\\\\033[0m` - Reset color back to default\\n\\nThis will print 'Done successfully' in green color without needing to import any module.\\n\\nThe ANSI escape codes allow changing text color, style (bold, underline etc) in the terminal. We just need to encapsulate the text we want to format between the ANSI codes.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Green text in Python with ANSI escape codes", - "stop_reason": "stop_sequence" - } - } - }, - "20231013211247627627": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "optional_params": {}, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "6e64f6cb-70b5-4e05-a192-6d4c80ff7614", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "optional_params": {}, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "6e64f6cb-70b5-4e05-a192-6d4c80ff7614", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "1644e83e-38ac-4f2f-88eb-bd7fbebf6be1", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Acknowledged! I have received the test request.", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697256767, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 19, - "completion_tokens": 13, - "total_tokens": 32 - } - } - } - }, - "20231013211248238166": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "optional_params": { - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "91531fff-101f-4b61-9c50-f61b36e68455", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "this is a test request, acknowledge that you got it" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013211426403031": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "735637ff-cbe4-4a14-81c1-738d35370072", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013211444781151": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "8b2c9e5e-535c-4d37-b556-98c71d83f86d", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "8b2c9e5e-535c-4d37-b556-98c71d83f86d", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-e4c0a2d9-ed36-4f97-9a0a-f4380785d2d1", - "object": "chat.completion.chunk", - "created": 1697256885, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\n\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \n\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \n\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\n\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\n\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\n\n- Check the server logs for errors\n- Try reproducing against a different endpoint or server\n- Capture the raw response with a tool like Wireshark to inspect it\n- Check for network issues like packet loss\n- Consider setting retries, backoff, or other retry handling in urllib3\n\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013211456208210": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\n\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \n\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \n\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\n\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\n\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\n\n- Check the server logs for errors\n- Try reproducing against a different endpoint or server\n- Capture the raw response with a tool like Wireshark to inspect it\n- Check for network issues like packet loss\n- Consider setting retries, backoff, or other retry handling in urllib3\n\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "7a948a2c-0846-4476-b5f3-2ce0ff52b5a3", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\n\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \n\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \n\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\n\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\n\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\n\n- Check the server logs for errors\n- Try reproducing against a different endpoint or server\n- Capture the raw response with a tool like Wireshark to inspect it\n- Check for network issues like packet loss\n- Consider setting retries, backoff, or other retry handling in urllib3\n\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked response from a server. Specifically:\\n\\n- In urllib3's response.py module, the _update_chunk_length method is trying to parse the length of the next chunk from the response. \\n\\n- It is calling int(line, 16) to parse the chunk length, which is represented in hex. \\n\\n- But the line variable is empty (b''), so it cannot be parsed as a hex integer, resulting in a ValueError.\\n\\nSo most likely the server returned an invalid chunked response, with an empty chunk size rather than a valid hex value. This is causing urllib3 to error while trying to parse the response.\\n\\nThe root cause could be a server-side issue returning invalid chunked responses, or possibly a network issue resulting in corrupted response data. Some things to try:\\n\\n- Check the server logs for errors\\n- Try reproducing against a different endpoint or server\\n- Capture the raw response with a tool like Wireshark to inspect it\\n- Check for network issues like packet loss\\n- Consider setting retries, backoff, or other retry handling in urllib3\\n\\nBut the key point is an invalid chunked response caused urllib3 to throw this error while trying to parse it.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013211702601907": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "d9139278-c263-421a-ba92-4d58c6297798", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nHuman: Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nHuman: Traceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "d9139278-c263-421a-ba92-4d58c6297798", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-2e4a928c-08b7-4f99-8d80-69cc9cd5ee70", - "object": "chat.completion.chunk", - "created": 1697257022, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013211716068287": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "c3aba2cb-431d-4778-bfb2-a8cb981ff1af", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "c3aba2cb-431d-4778-bfb2-a8cb981ff1af", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Invalid chunked encoding in HTTP response", - "stop_reason": "stop_sequence" - } - } - }, - "20231013211719949668": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "95abd0ba-d6df-4d5b-a3c7-b432df12b146", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Response:\\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "95abd0ba-d6df-4d5b-a3c7-b432df12b146", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Instruction:\nTraceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?\n### Response:\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \n\nSpecifically, it failed on this line:\n\n```\nself.chunk_left = int(line, 16)\n```\n\nWhere `line` was an empty byte string `b''`.\n\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\r\\n' but got an empty string instead.\n\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \n\nThe root cause could be:\n\n- The server has a bug in its chunked encoding implementation\n- There is a network issue corrupting the response\n- The client made a request that triggered an edge case on the server\n\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Instruction:\\nTraceback (most recent call last):\\n File \\\"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\\\", line 761, in _update_chunk_length\\n self.chunk_left = int(line, 16)\\n ^^^^^^^^^^^^^\\nValueError: invalid literal for int() with base 16: b''\\n\\n---\\n\\nwhat is happening?\\n### Response:\\n This traceback indicates an error occurred while urllib3 was trying to parse the chunked encoding of an HTTP response. \\n\\nSpecifically, it failed on this line:\\n\\n```\\nself.chunk_left = int(line, 16)\\n```\\n\\nWhere `line` was an empty byte string `b''`.\\n\\nThe chunked encoding format specifies each chunk starts with the length in hex, followed by a newline. So it was expecting a hex string like '100\\\\r\\\\n' but got an empty string instead.\\n\\nSo most likely the server returned an invalid chunked response that didn't follow the proper format. \\n\\nThe root cause could be:\\n\\n- The server has a bug in its chunked encoding implementation\\n- There is a network issue corrupting the response\\n- The client made a request that triggered an edge case on the server\\n\\nSo in summary, this error indicates the HTTP response being parsed did not contain valid chunked encoding as expected. The root cause needs further debugging of the server response and/or network conditions.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Invalid chunked encoding in HTTP response", - "stop_reason": "stop_sequence" - } - } - }, - "20231013212440790156": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "1f157151-90b2-4aac-a949-9bd9db21b87c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013212442740993": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "73f74f44-1005-4cf8-9d4e-b610e45faee9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "73f74f44-1005-4cf8-9d4e-b610e45faee9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": null - } - }, - "20231013212634080367": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "1f3e2c49-d256-476c-b9e3-ded72b726770", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013212636338333": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "21eb950d-6eba-44c0-bdc6-40c35b96f9c0", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "21eb950d-6eba-44c0-bdc6-40c35b96f9c0", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": null - } - }, - "20231013212713833601": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "9eb5637d-ba26-423d-b18e-6675a8589b5d", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013212716209992": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "fd522c4d-fa23-45da-b951-100279fd0832", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "fd522c4d-fa23-45da-b951-100279fd0832", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": null - } - }, - "20231013212727242470": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "4996a108-c2a5-4fb7-9247-13dc24858203", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013212728665109": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "10bd51f7-f48f-44f1-beb0-429692ee50e4", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "10bd51f7-f48f-44f1-beb0-429692ee50e4", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": null - } - }, - "20231013212822162015": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "f3edcbf8-5902-4fe0-81b5-108ab68c0b8a", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013212824301116": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "ee74e5c9-e0c9-49ae-8434-3d640a56a4fb", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "ee74e5c9-e0c9-49ae-8434-3d640a56a4fb", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": null - } - }, - "20231013213040630177": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "27ac6fa8-750b-43d3-a9d4-e0cd29cd6b6a", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213042848247": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "5f49fd9c-1ba0-4d9b-8b4d-59a47a04967b", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "5f49fd9c-1ba0-4d9b-8b4d-59a47a04967b", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "0766a9b2-81e5-4977-aabd-397b2d8a446a", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697257842, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013213104790104": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "0956ede5-da79-4070-8f70-d4e16fe371f2", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - }, - { - "role": "user", - "content": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/miniconda3/lib/python3.11/site-packages/urllib3/response.py\", line 761, in _update_chunk_length\n self.chunk_left = int(line, 16)\n ^^^^^^^^^^^^^\nValueError: invalid literal for int() with base 16: b''\n\n---\n\nwhat is happening?" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213106766925": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "caf48b76-5699-46ba-a825-56ca906552d1", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "caf48b76-5699-46ba-a825-56ca906552d1", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "ca279a10-04f2-42ef-957b-f6914c522208", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "length", - "index": 0, - "message": { - "content": "I'm sorry, but I'm not sure what you're asking. Can you please", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697257866, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 20, - "total_tokens": 29 - } - } - } - }, - "20231013213116202196": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "0a655200-aa97-4a41-9083-6bbd2245f9bc", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213116464103": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "45d6bb72-338c-40b8-b76e-9b1efaad0a01", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "45d6bb72-338c-40b8-b76e-9b1efaad0a01", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "12e06fba-2f03-45ed-a294-1cdc467513df", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "length", - "index": 0, - "message": { - "content": "What is the next term in -973, -1945, -29", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697257876, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 20, - "total_tokens": 29 - } - } - } - }, - "20231013213116947146": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "d4ef9365-c599-4ac2-8410-642ec094e126", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "d4ef9365-c599-4ac2-8410-642ec094e126", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\n\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "a55ed8d0-217d-45af-b166-a0768181789c", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "length", - "index": 0, - "message": { - "content": "I'm sorry, it seems like there was an error in the previous response. Can you please", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697257876, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 20, - "total_tokens": 29 - } - } - } - }, - "20231013213150884896": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "8ffdaf4b-5eb0-46a4-b523-78efc0ae1484", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213151554236": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "6824339a-0dba-46d8-9c62-caff71b0c032", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "6824339a-0dba-46d8-9c62-caff71b0c032", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "69e15407-b9fd-4cec-94d2-915465612e82", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697257911, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013213244723525": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "3d42213c-bbd1-4842-85f8-a67caeda8a78", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213245454481": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "8d5ca9a4-479c-4839-978e-fe71d3087b84", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "8d5ca9a4-479c-4839-978e-fe71d3087b84", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "1300a2b8-ee16-4863-82ad-011a6186e379", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I assist you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697257965, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013213341878538": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "616a6dab-577f-4fa6-bcdc-bd1c64c561c9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213342683953": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "370a2c6a-f3d5-43c4-ab01-ad67466cbc7b", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "370a2c6a-f3d5-43c4-ab01-ad67466cbc7b", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "9dc52b98-b413-472e-a12a-a7649052fb42", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258022, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 8, - "total_tokens": 17 - } - } - } - }, - "20231013213448796618": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "1aed5ecd-db9f-4e14-85c0-76ab2c4267b1", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213449357105": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "0b701d17-7db0-46e2-86d2-4d1f7ca0650c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "0b701d17-7db0-46e2-86d2-4d1f7ca0650c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "1b887188-9148-445e-b711-3d3d0cf6b185", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258089, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013213517902785": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "e4a66a54-aced-448b-8fc7-af323fc3e06d", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213518678678": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "08642a2a-51fc-4b38-9e01-b7595bf9a104", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "08642a2a-51fc-4b38-9e01-b7595bf9a104", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "52d91416-c647-44cb-a4ee-1dc70d2072db", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258118, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013213539152325": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "8de200e3-2125-4f50-82fc-c9b6d12799a4", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213539846020": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "8ca07d1a-0a55-46d6-8b21-1cf098197b05", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "8ca07d1a-0a55-46d6-8b21-1cf098197b05", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "51410c8c-c7d8-4ba2-9399-02a8acd3e180", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258139, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013213744078191": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "aa5f4e9c-bc99-4f22-9a98-24fe2dbe7249", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213835712925": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "19fe0854-3616-4882-980f-87127e97263a", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213914253529": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "63e851e8-719b-4810-8f90-8af2d436bc54", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013213915110326": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "d00f6bd9-5e93-4433-be35-9de27d7bac63", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "d00f6bd9-5e93-4433-be35-9de27d7bac63", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "f8eb8660-0c25-4a6c-a933-5490c2393ba1", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258355, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013214136176621": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "448a5afb-c4a3-4426-89ee-3d9a0bdf0703", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013214136742309": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "1f019c82-11f7-4772-aea7-866e49149605", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "1f019c82-11f7-4772-aea7-866e49149605", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "4dd3e710-cff3-4cc9-970d-1ddc5c733c79", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "length", - "index": 0, - "message": { - "content": "To calculate the remainder when 1584 is divided by 15, we can use", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258496, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 20, - "total_tokens": 29 - } - } - } - }, - "20231013214238159337": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "590120e8-4b90-47c1-bb2a-4e267d6162d9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013214238721319": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "e6783349-3427-46d0-ad71-749f7f088097", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "e6783349-3427-46d0-ad71-749f7f088097", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "24f98227-c801-4d6e-82a0-0ae0a00d5274", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I assist you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258558, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013214340528772": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "0f538792-68de-4310-9dcd-5555fcebfb0e", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - } - }, - "20231013214341284390": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "f24d3c00-4895-497e-b638-67b4338909e9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "f24d3c00-4895-497e-b638-67b4338909e9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "4e28c3e7-7acf-4e1c-9ef9-3eb6944d1f74", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I assist you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258621, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013214414518891": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "6caf4d75-a5c2-488c-a0f2-981966670de4", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "6caf4d75-a5c2-488c-a0f2-981966670de4", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-f65e349b-51c8-428a-aaae-052cdda20e33", - "object": "chat.completion.chunk", - "created": 1697258654, - "model": "mistral-7b-instruct", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Hello! How can I assist you today?" - }, - "finish_reason": "stop" - } - ] - } - } - }, - "20231013214414838837": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "03309753-b23d-40c7-afe3-a2e2a24b0f93", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "03309753-b23d-40c7-afe3-a2e2a24b0f93", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"Hello! How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "675d7305-e5d2-4eae-88d1-1b5c362f636c", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "length", - "index": 0, - "message": { - "content": "Welcome to my language model! I'm here to help you with any language-related task you", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258654, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 20, - "total_tokens": 29 - } - } - } - }, - "20231013214415390002": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "509dc457-8553-4902-8c44-432c53acd3fa", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "509dc457-8553-4902-8c44-432c53acd3fa", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "### Instruction:\nhey\n### Response:\nHello! How can I assist you today?\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "a250438f-1fd0-4acd-a65a-921bb102c956", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I help you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258655, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013214424018232": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "dbdb2650-bf03-40bc-a722-49329319b5e0", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "user", - "content": "hey" - }, - { - "role": "user", - "content": "hey" - } - ], - "optional_params": { - "temperature": 0.5, - "stream": true, - "max_tokens": 1024 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "dbdb2650-bf03-40bc-a722-49329319b5e0", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-cf00dc6d-3cd1-4a22-aaf9-0e40125e7d59", - "object": "chat.completion.chunk", - "created": 1697258664, - "model": "mistral-7b-instruct", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "How can I assist you today?" - }, - "finish_reason": "stop" - } - ] - } - } - }, - "20231013214424390214": { - "pre_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "509ae926-b48a-4fb4-8d59-8ab93ddf2455", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "api_key": "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", - "additional_args": { - "headers": null, - "api_base": "https://api.perplexity.ai" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "mistral-7b-instruct", - "messages": [ - { - "role": "system", - "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "optional_params": { - "temperature": 0.5, - "max_tokens": 20 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "custom_openai", - "api_base": "https://api.perplexity.ai", - "litellm_call_id": "509ae926-b48a-4fb4-8d59-8ab93ddf2455", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": [ - { - "role": "system", - "content": "\"How can I assist you today?\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - }, - { - "role": "user", - "content": "" - } - ], - "api_key": "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", - "additional_args": { - "headers": null - }, - "log_event_type": "post_api_call", - "original_response": { - "id": "1e9e3514-25d7-4884-af52-a136cc0d0bf4", - "object": "chat.completion", - "choices": [ - { - "finish_reason": "stop", - "index": 0, - "message": { - "content": "Hello! How can I assist you today?", - "role": "assistant", - "logprobs": null - } - } - ], - "created": 1697258664, - "model": "mistral-7b-instruct", - "usage": { - "prompt_tokens": 9, - "completion_tokens": 10, - "total_tokens": 19 - } - } - } - }, - "20231013220339431385": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "d8ab6233-3f96-48b6-8aa0-4b094a5ef4a9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: user_config_path =litellm.secrets.toml\\n\\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "d8ab6233-3f96-48b6-8aa0-4b094a5ef4a9", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-7ba195b1-1ad5-4e29-8c5a-0b7df4be5a3a", - "object": "chat.completion.chunk", - "created": 1697259819, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013220351116828": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "7b0cd5b8-f94e-470a-99d5-52d1b4b7f794", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "7b0cd5b8-f94e-470a-99d5-52d1b4b7f794", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Add new key-value pair to dictionary in TOML config file", - "stop_reason": "stop_sequence" - } - } - }, - "20231013220352219756": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nuser_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n### Response:\n Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "a562eabc-ef9f-464f-a614-52b636d7e18c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\nuser_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n### Response:\n Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nuser_config_path =litellm.secrets.toml\\n\\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\\n### Response:\\n Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "### Instruction:\nuser_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n### Response:\n Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: " - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "a562eabc-ef9f-464f-a614-52b636d7e18c", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: ### Instruction:\nuser_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n### Response:\n Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n### Response:\n\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: ### Instruction:\\nuser_config_path =litellm.secrets.toml\\n\\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\\n### Response:\\n Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\n### Response:\\n\\n\\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: \\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Adding a key to a TOML config file", - "stop_reason": "stop_sequence" - } - } - }, - "20231013220405586160": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - }, - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "faa0bd0b-24f0-4cb7-90d3-c4403ec2bda8", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n\nHuman: handle for the case that the file doesn't exist\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: user_config_path =litellm.secrets.toml\\n\\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\\n\\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\n\\nHuman: handle for the case that the file doesn't exist\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - }, - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "faa0bd0b-24f0-4cb7-90d3-c4403ec2bda8", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-cf9f8835-bba8-4e32-ac99-13a642715823", - "object": "chat.completion.chunk", - "created": 1697259845, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013220417508501": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "3c5586cb-46ec-4e2f-b02a-3fe5a55d2a32", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\\n\\n```python\\nimport toml\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Check if file exists\\nif os.path.exists(user_config_path):\\n # Load existing file\\n config = toml.load(user_config_path) \\nelse:\\n # File doesn't exist, create empty config\\n config = {}\\n\\n# Add new key \\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\n# Write config to file \\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Check if file exists using `os.path.exists()`\\n- If it doesn't exist, initialize an empty config dict\\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\\n- Write the config to file, which will create the file if it didn't exist\\n\\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "3c5586cb-46ec-4e2f-b02a-3fe5a55d2a32", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\\n\\n```python\\nimport toml\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Check if file exists\\nif os.path.exists(user_config_path):\\n # Load existing file\\n config = toml.load(user_config_path) \\nelse:\\n # File doesn't exist, create empty config\\n config = {}\\n\\n# Add new key \\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\n# Write config to file \\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Check if file exists using `os.path.exists()`\\n- If it doesn't exist, initialize an empty config dict\\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\\n- Write the config to file, which will create the file if it didn't exist\\n\\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Adding file existence check before loading config", - "stop_reason": "stop_sequence" - } - } - }, - "20231013220935096517": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - }, - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "2aef8588-ef19-4a7e-8deb-6ad798227ade", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n\nHuman: handle for the case that the file doesn't exist\n\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\n\nHuman: tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: user_config_path =litellm.secrets.toml\\n\\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\\n\\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\n\\nHuman: handle for the case that the file doesn't exist\\n\\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\\n\\n```python\\nimport toml\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Check if file exists\\nif os.path.exists(user_config_path):\\n # Load existing file\\n config = toml.load(user_config_path) \\nelse:\\n # File doesn't exist, create empty config\\n config = {}\\n\\n# Add new key \\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\n# Write config to file \\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Check if file exists using `os.path.exists()`\\n- If it doesn't exist, initialize an empty config dict\\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\\n- Write the config to file, which will create the file if it didn't exist\\n\\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\\n\\nHuman: tomllib.dump(config, f)\\n ^^^^^^^^^^^^\\nAttributeError: module 'tomli' has no attribute 'dump'\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - }, - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "2aef8588-ef19-4a7e-8deb-6ad798227ade", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-43a09df4-3ef3-476c-9f58-5ee66c2ab8d8", - "object": "chat.completion.chunk", - "created": 1697260175, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!" - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013220953079681": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "b97a09cf-d88a-4b75-aa56-2bb57d6820c3", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \\n\\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\\n\\nHere is the fixed code using `tomli` to load and dump the config:\\n\\n```python\\nimport tomli\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n config = tomli.load(user_config_path) \\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n tomli.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Use `tomli` instead of `toml`\\n- Call `tomli.dump()` to write out the config\\n\\nLet me know if this helps resolve the AttributeError!\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "b97a09cf-d88a-4b75-aa56-2bb57d6820c3", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \\n\\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\\n\\nHere is the fixed code using `tomli` to load and dump the config:\\n\\n```python\\nimport tomli\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n config = tomli.load(user_config_path) \\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n tomli.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Use `tomli` instead of `toml`\\n- Call `tomli.dump()` to write out the config\\n\\nLet me know if this helps resolve the AttributeError!\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Config file update using tomlli module", - "stop_reason": "stop_sequence" - } - } - }, - "20231013221123589858": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - }, - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - }, - { - "role": "assistant", - "content": " You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!" - }, - { - "role": "user", - "content": "can you write config to the filepath without using tomli?" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "d3378ee3-f4b6-417d-9243-d29cc9bb3163", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\n\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n\nHuman: handle for the case that the file doesn't exist\n\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\n\nHuman: tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'\n\nAssistant: You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\n\nHuman: can you write config to the filepath without using tomli?\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: user_config_path =litellm.secrets.toml\\n\\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary\\n\\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\n\\nHuman: handle for the case that the file doesn't exist\\n\\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\\n\\n```python\\nimport toml\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Check if file exists\\nif os.path.exists(user_config_path):\\n # Load existing file\\n config = toml.load(user_config_path) \\nelse:\\n # File doesn't exist, create empty config\\n config = {}\\n\\n# Add new key \\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\n# Write config to file \\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Check if file exists using `os.path.exists()`\\n- If it doesn't exist, initialize an empty config dict\\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\\n- Write the config to file, which will create the file if it didn't exist\\n\\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\\n\\nHuman: tomllib.dump(config, f)\\n ^^^^^^^^^^^^\\nAttributeError: module 'tomli' has no attribute 'dump'\\n\\nAssistant: You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \\n\\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\\n\\nHere is the fixed code using `tomli` to load and dump the config:\\n\\n```python\\nimport tomli\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n config = tomli.load(user_config_path) \\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n tomli.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Use `tomli` instead of `toml`\\n- Call `tomli.dump()` to write out the config\\n\\nLet me know if this helps resolve the AttributeError!\\n\\nHuman: can you write config to the filepath without using tomli?\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "user_config_path =litellm.secrets.toml\n\nwrite a python script that adds a new dictionary item to user_config_path inside the keys dictionary" - }, - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - }, - { - "role": "assistant", - "content": " You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!" - }, - { - "role": "user", - "content": "can you write config to the filepath without using tomli?" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "d3378ee3-f4b6-417d-9243-d29cc9bb3163", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-b6c37047-e964-4501-a075-785a2dc54e48", - "object": "chat.completion.chunk", - "created": 1697260283, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!" - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013221138277938": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "15d5e700-8347-4c3d-9981-602df847a111", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Yes, you can write the config data to the file without using the tomli module. \\n\\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\\n\\n```python\\nimport json \\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.json\\\" \\n\\nif os.path.exists(user_config_path):\\n with open(user_config_path) as f:\\n config = json.load(f)\\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n json.dump(config, f)\\n```\\n\\nThe key points:\\n\\n- Load config using json.load() and write with json.dump() \\n- This will store the data in JSON format rather than TOML\\n- Change file extension to .json\\n\\nNo need for the tomli module. The config will be saved to the file in JSON format.\\n\\nLet me know if you have any other questions!\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "15d5e700-8347-4c3d-9981-602df847a111", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Yes, you can write the config data to the file without using the tomli module. \\n\\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\\n\\n```python\\nimport json \\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.json\\\" \\n\\nif os.path.exists(user_config_path):\\n with open(user_config_path) as f:\\n config = json.load(f)\\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n json.dump(config, f)\\n```\\n\\nThe key points:\\n\\n- Load config using json.load() and write with json.dump() \\n- This will store the data in JSON format rather than TOML\\n- Change file extension to .json\\n\\nNo need for the tomli module. The config will be saved to the file in JSON format.\\n\\nLet me know if you have any other questions!\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Writing config data to file without tomli module", - "stop_reason": "stop_sequence" - } - } - }, - "20231013221146823431": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - }, - { - "role": "assistant", - "content": " You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!" - }, - { - "role": "user", - "content": "can you write config to the filepath without using tomli?" - }, - { - "role": "assistant", - "content": " Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!" - }, - { - "role": "user", - "content": "no i need to do it as a toml" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4ad531ac-dabd-42b6-98eb-5f06f9bab106", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\n\nHuman: handle for the case that the file doesn't exist\n\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\n\nHuman: tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'\n\nAssistant: You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\n\nHuman: can you write config to the filepath without using tomli?\n\nAssistant: Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!\n\nHuman: no i need to do it as a toml\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nAssistant: Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\\n\\n```python\\nimport toml\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Load the TOML file\\nconfig = toml.load(user_config_path)\\n\\n# Add new key to the 'keys' dictionary \\nconfig['keys']['new_key'] = 'new_value'\\n\\n# Write the updated config back to the file\\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key steps are:\\n\\n1. Load the TOML file using `toml.load()`\\n2. Add the new key-value pair to the 'keys' dictionary \\n3. Write the updated config back to the file using `toml.dump()`\\n\\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file.\\n\\nHuman: handle for the case that the file doesn't exist\\n\\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\\n\\n```python\\nimport toml\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Check if file exists\\nif os.path.exists(user_config_path):\\n # Load existing file\\n config = toml.load(user_config_path) \\nelse:\\n # File doesn't exist, create empty config\\n config = {}\\n\\n# Add new key \\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\n# Write config to file \\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Check if file exists using `os.path.exists()`\\n- If it doesn't exist, initialize an empty config dict\\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\\n- Write the config to file, which will create the file if it didn't exist\\n\\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\\n\\nHuman: tomllib.dump(config, f)\\n ^^^^^^^^^^^^\\nAttributeError: module 'tomli' has no attribute 'dump'\\n\\nAssistant: You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \\n\\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\\n\\nHere is the fixed code using `tomli` to load and dump the config:\\n\\n```python\\nimport tomli\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n config = tomli.load(user_config_path) \\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n tomli.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Use `tomli` instead of `toml`\\n- Call `tomli.dump()` to write out the config\\n\\nLet me know if this helps resolve the AttributeError!\\n\\nHuman: can you write config to the filepath without using tomli?\\n\\nAssistant: Yes, you can write the config data to the file without using the tomli module. \\n\\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\\n\\n```python\\nimport json \\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.json\\\" \\n\\nif os.path.exists(user_config_path):\\n with open(user_config_path) as f:\\n config = json.load(f)\\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n json.dump(config, f)\\n```\\n\\nThe key points:\\n\\n- Load config using json.load() and write with json.dump() \\n- This will store the data in JSON format rather than TOML\\n- Change file extension to .json\\n\\nNo need for the tomli module. The config will be saved to the file in JSON format.\\n\\nLet me know if you have any other questions!\\n\\nHuman: no i need to do it as a toml\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "assistant", - "content": " Here is a Python script that adds a new dictionary item to the keys dictionary in the user_config_path file:\n\n```python\nimport toml\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Load the TOML file\nconfig = toml.load(user_config_path)\n\n# Add new key to the 'keys' dictionary \nconfig['keys']['new_key'] = 'new_value'\n\n# Write the updated config back to the file\nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key steps are:\n\n1. Load the TOML file using `toml.load()`\n2. Add the new key-value pair to the 'keys' dictionary \n3. Write the updated config back to the file using `toml.dump()`\n\nThis will add {'new_key': 'new_value'} to the 'keys' dictionary in the given TOML file." - }, - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - }, - { - "role": "assistant", - "content": " You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!" - }, - { - "role": "user", - "content": "can you write config to the filepath without using tomli?" - }, - { - "role": "assistant", - "content": " Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!" - }, - { - "role": "user", - "content": "no i need to do it as a toml" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "4ad531ac-dabd-42b6-98eb-5f06f9bab106", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": null, - "api_key": null, - "additional_args": {}, - "log_event_type": "post_api_call", - "original_response": "", - "complete_streaming_response": { - "id": "chatcmpl-a5ca9cec-d787-48a6-b9ac-96a96f3d4e2a", - "object": "chat.completion.chunk", - "created": 1697260307, - "model": "anthropic.claude-v2", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": " Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\n\n```python\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = # parse file as toml\nelse:\n config = {} \n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n for section, data in config.items():\n f.write('[%s]\\n' % section)\n for k, v in data.items():\n f.write('%s = \"%s\"\\n' % (k, v))\n```\n\nThe key points:\n\n- Manually write the TOML formatting to the file \n- Use [section] headers for each dict key\n- Output key = \"value\" lines for each item\n\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!" - }, - "finish_reason": "stop_sequence" - } - ] - } - } - }, - "20231013221210098068": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\n\n```python\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = # parse file as toml\nelse:\n config = {} \n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n for section, data in config.items():\n f.write('[%s]\\n' % section)\n for k, v in data.items():\n f.write('%s = \"%s\"\\n' % (k, v))\n```\n\nThe key points:\n\n- Manually write the TOML formatting to the file \n- Use [section] headers for each dict key\n- Output key = \"value\" lines for each item\n\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "dcb98526-c8b3-4574-b48a-36e02ef32793", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\n\n```python\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = # parse file as toml\nelse:\n config = {} \n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n for section, data in config.items():\n f.write('[%s]\\n' % section)\n for k, v in data.items():\n f.write('%s = \"%s\"\\n' % (k, v))\n```\n\nThe key points:\n\n- Manually write the TOML formatting to the file \n- Use [section] headers for each dict key\n- Output key = \"value\" lines for each item\n\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\\n\\n```python\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n with open(user_config_path) as f:\\n config = # parse file as toml\\nelse:\\n config = {} \\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n for section, data in config.items():\\n f.write('[%s]\\\\n' % section)\\n for k, v in data.items():\\n f.write('%s = \\\"%s\\\"\\\\n' % (k, v))\\n```\\n\\nThe key points:\\n\\n- Manually write the TOML formatting to the file \\n- Use [section] headers for each dict key\\n- Output key = \\\"value\\\" lines for each item\\n\\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - }, - "post_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "system", - "content": "\" Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\n\n```python\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = # parse file as toml\nelse:\n config = {} \n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n for section, data in config.items():\n f.write('[%s]\\n' % section)\n for k, v in data.items():\n f.write('%s = \"%s\"\\n' % (k, v))\n```\n\nThe key points:\n\n- Manually write the TOML formatting to the file \n- Use [section] headers for each dict key\n- Output key = \"value\" lines for each item\n\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:" - } - ], - "optional_params": { - "max_tokens_to_sample": 20, - "temperature": 0.5 - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "dcb98526-c8b3-4574-b48a-36e02ef32793", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: \" Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\n\n```python\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = # parse file as toml\nelse:\n config = {} \n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n for section, data in config.items():\n f.write('[%s]\\n' % section)\n for k, v in data.items():\n f.write('%s = \"%s\"\\n' % (k, v))\n```\n\nThe key points:\n\n- Manually write the TOML formatting to the file \n- Use [section] headers for each dict key\n- Output key = \"value\" lines for each item\n\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!\"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: \\\" Okay, to write the config dictionary to a TOML file without using the tomli module, you can do:\\n\\n```python\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n with open(user_config_path) as f:\\n config = # parse file as toml\\nelse:\\n config = {} \\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n for section, data in config.items():\\n f.write('[%s]\\\\n' % section)\\n for k, v in data.items():\\n f.write('%s = \\\"%s\\\"\\\\n' % (k, v))\\n```\\n\\nThe key points:\\n\\n- Manually write the TOML formatting to the file \\n- Use [section] headers for each dict key\\n- Output key = \\\"value\\\" lines for each item\\n\\nThis handles writing the dict in TOML format without using the toml module. You would still need to parse the existing file as TOML if it exists. Let me know if this helps!\\\"\\n\\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:\\n\\nAssistant: \", \"max_tokens_to_sample\": 20, \"temperature\": 0.5}" - }, - "log_event_type": "post_api_call", - "original_response": { - "completion": " Writing config dict to TOML file without tomli module", - "stop_reason": "stop_sequence" - } - } - }, - "20231013222038119973": { - "pre_api_call": { - "model": "anthropic.claude-v2", - "messages": [ - { - "role": "user", - "content": "handle for the case that the file doesn't exist" - }, - { - "role": "assistant", - "content": " Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added." - }, - { - "role": "user", - "content": " tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'" - }, - { - "role": "assistant", - "content": " You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!" - }, - { - "role": "user", - "content": "can you write config to the filepath without using tomli?" - }, - { - "role": "assistant", - "content": " Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!" - }, - { - "role": "user", - "content": "no i need to do it as a toml" - }, - { - "role": "user", - "content": "no i need to do it as a toml" - } - ], - "optional_params": { - "max_tokens_to_sample": 1024, - "temperature": 0.5, - "stream": true - }, - "litellm_params": { - "return_async": false, - "api_key": null, - "force_timeout": 600, - "logger_fn": null, - "verbose": false, - "custom_llm_provider": "bedrock", - "api_base": null, - "litellm_call_id": "558b3b41-7936-40e4-aafc-dc01c155fe82", - "model_alias_map": {}, - "completion_call_id": null, - "metadata": null, - "stream_response": {} - }, - "input": "\n\nHuman: \n\nHuman: handle for the case that the file doesn't exist\n\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\n\n```python\nimport toml\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\n# Check if file exists\nif os.path.exists(user_config_path):\n # Load existing file\n config = toml.load(user_config_path) \nelse:\n # File doesn't exist, create empty config\n config = {}\n\n# Add new key \nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\n# Write config to file \nwith open(user_config_path, 'w') as f:\n toml.dump(config, f)\n```\n\nThe key changes:\n\n- Check if file exists using `os.path.exists()`\n- If it doesn't exist, initialize an empty config dict\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\n- Write the config to file, which will create the file if it didn't exist\n\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\n\nHuman: tomllib.dump(config, f)\n ^^^^^^^^^^^^\nAttributeError: module 'tomli' has no attribute 'dump'\n\nAssistant: You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \n\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\n\nHere is the fixed code using `tomli` to load and dump the config:\n\n```python\nimport tomli\nimport os\n\nuser_config_path = \"litellm.secrets.toml\"\n\nif os.path.exists(user_config_path):\n config = tomli.load(user_config_path) \nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n tomli.dump(config, f)\n```\n\nThe key changes:\n\n- Use `tomli` instead of `toml`\n- Call `tomli.dump()` to write out the config\n\nLet me know if this helps resolve the AttributeError!\n\nHuman: can you write config to the filepath without using tomli?\n\nAssistant: Yes, you can write the config data to the file without using the tomli module. \n\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\n\n```python\nimport json \nimport os\n\nuser_config_path = \"litellm.secrets.json\" \n\nif os.path.exists(user_config_path):\n with open(user_config_path) as f:\n config = json.load(f)\nelse:\n config = {}\n\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\n\nwith open(user_config_path, 'w') as f:\n json.dump(config, f)\n```\n\nThe key points:\n\n- Load config using json.load() and write with json.dump() \n- This will store the data in JSON format rather than TOML\n- Change file extension to .json\n\nNo need for the tomli module. The config will be saved to the file in JSON format.\n\nLet me know if you have any other questions!\n\nHuman: no i need to do it as a toml\n\nHuman: no i need to do it as a toml\n\nAssistant: ", - "api_key": "", - "additional_args": { - "complete_input_dict": "{\"prompt\": \"\\n\\nHuman: \\n\\nHuman: handle for the case that the file doesn't exist\\n\\nAssistant: Good point! We should check if the file exists before trying to load it. Here is an updated script that handles the case where the file does not exist:\\n\\n```python\\nimport toml\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\n# Check if file exists\\nif os.path.exists(user_config_path):\\n # Load existing file\\n config = toml.load(user_config_path) \\nelse:\\n # File doesn't exist, create empty config\\n config = {}\\n\\n# Add new key \\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\n# Write config to file \\nwith open(user_config_path, 'w') as f:\\n toml.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Check if file exists using `os.path.exists()`\\n- If it doesn't exist, initialize an empty config dict\\n- Use `setdefault()` to safely add the 'keys' sub-dict if needed\\n- Write the config to file, which will create the file if it didn't exist\\n\\nThis handles the case where the file doesn't exist yet and creates it with the new key added.\\n\\nHuman: tomllib.dump(config, f)\\n ^^^^^^^^^^^^\\nAttributeError: module 'tomli' has no attribute 'dump'\\n\\nAssistant: You're right, my previous code examples had a typo using `toml.dump()` instead of `toml.dump()`. \\n\\nSince you're getting an AttributeError for `tomllib`, it seems you are using the `tomli` module rather than `toml`.\\n\\nHere is the fixed code using `tomli` to load and dump the config:\\n\\n```python\\nimport tomli\\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.toml\\\"\\n\\nif os.path.exists(user_config_path):\\n config = tomli.load(user_config_path) \\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n tomli.dump(config, f)\\n```\\n\\nThe key changes:\\n\\n- Use `tomli` instead of `toml`\\n- Call `tomli.dump()` to write out the config\\n\\nLet me know if this helps resolve the AttributeError!\\n\\nHuman: can you write config to the filepath without using tomli?\\n\\nAssistant: Yes, you can write the config data to the file without using the tomli module. \\n\\nSince the config is a python dictionary, you can directly write it to the file in JSON format using the json module:\\n\\n```python\\nimport json \\nimport os\\n\\nuser_config_path = \\\"litellm.secrets.json\\\" \\n\\nif os.path.exists(user_config_path):\\n with open(user_config_path) as f:\\n config = json.load(f)\\nelse:\\n config = {}\\n\\nconfig.setdefault('keys', {})['new_key'] = 'new_value'\\n\\nwith open(user_config_path, 'w') as f:\\n json.dump(config, f)\\n```\\n\\nThe key points:\\n\\n- Load config using json.load() and write with json.dump() \\n- This will store the data in JSON format rather than TOML\\n- Change file extension to .json\\n\\nNo need for the tomli module. The config will be saved to the file in JSON format.\\n\\nLet me know if you have any other questions!\\n\\nHuman: no i need to do it as a toml\\n\\nHuman: no i need to do it as a toml\\n\\nAssistant: \", \"max_tokens_to_sample\": 1024, \"temperature\": 0.5}" - }, - "log_event_type": "pre_api_call" - } - } -} \ No newline at end of file + "end_time": \ No newline at end of file diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index e1f3fc5879..bd8ef8cc73 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -88,13 +88,15 @@ def is_port_in_use(port): @click.option('--port', default=8000, help='Port to bind the server to.') @click.option('--api_base', default=None, help='API base URL.') @click.option('--model', default=None, help='The model name to pass to litellm expects') +@click.option('--alias', default=None, help='The alias for the model - use this to give a litellm model name (e.g. "huggingface/codellama/CodeLlama-7b-Instruct-hf") a more user-friendly name ("codellama")') @click.option('--add_key', default=None, help='The model name to pass to litellm expects') +@click.option('--headers', default=None, help='headers for the API call') @click.option('--deploy', is_flag=True, type=bool, help='Get a deployed proxy endpoint - api.litellm.ai') +@click.option('--save', is_flag=True, type=bool, help='Save the model-specific config') @click.option('--debug', default=False, is_flag=True, type=bool, help='To debug the input') @click.option('--temperature', default=None, type=float, help='Set temperature for the model') @click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model') @click.option('--drop_params', is_flag=True, help='Drop any unmapped params') -@click.option('--save', is_flag=True, help='Save params to config, to persist across restarts') @click.option('--create_proxy', is_flag=True, help='Creates a local OpenAI-compatible server template') @click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt') @click.option('--config', '-c', is_flag=True, help='Configure Litellm') @@ -105,7 +107,7 @@ def is_port_in_use(port): @click.option('--test', flag_value=True, help='proxy chat completions url to make a test request to') @click.option('--local', is_flag=True, default=False, help='for local debugging') @click.option('--cost', is_flag=True, default=False, help='for viewing cost logs') -def run_server(host, port, api_base, model, add_key, deploy, debug, temperature, max_tokens, drop_params, create_proxy, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, cost, save): +def run_server(host, port, api_base, model, alias, add_key, headers, deploy, save, debug, temperature, max_tokens, drop_params, create_proxy, add_function_to_prompt, config, file, max_budget, telemetry, logs, test, local, cost): global feature_telemetry args = locals() if local: @@ -133,19 +135,22 @@ def run_server(host, port, api_base, model, add_key, deploy, debug, temperature, if logs is not None: if logs == 0: # default to 1 logs = 1 - with open('api_log.json') as f: - data = json.load(f) + try: + with open('api_log.json') as f: + data = json.load(f) - # convert keys to datetime objects - log_times = {datetime.strptime(k, "%Y%m%d%H%M%S%f"): v for k, v in data.items()} + # convert keys to datetime objects + log_times = {datetime.strptime(k, "%Y%m%d%H%M%S%f"): v for k, v in data.items()} - # sort by timestamp - sorted_times = sorted(log_times.items(), key=operator.itemgetter(0), reverse=True) + # sort by timestamp + sorted_times = sorted(log_times.items(), key=operator.itemgetter(0), reverse=True) - # get n recent logs - recent_logs = {k.strftime("%Y%m%d%H%M%S%f"): v for k, v in sorted_times[:logs]} + # get n recent logs + recent_logs = {k.strftime("%Y%m%d%H%M%S%f"): v for k, v in sorted_times[:logs]} - print(json.dumps(recent_logs, indent=4)) + print(json.dumps(recent_logs, indent=4)) + except: + print("LiteLLM: No logs saved!") return if add_key: key_name, key_value = add_key.split("=") @@ -200,7 +205,9 @@ def run_server(host, port, api_base, model, add_key, deploy, debug, temperature, click.echo(f'LiteLLM: streaming response from proxy {chunk}') return else: - initialize(model, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt) + if headers: + headers = json.loads(headers) + initialize(model=model, alias=alias, api_base=api_base, debug=debug, temperature=temperature, max_tokens=max_tokens, max_budget=max_budget, telemetry=telemetry, drop_params=drop_params, add_function_to_prompt=add_function_to_prompt, headers=headers, save=save) try: import uvicorn except: diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 5bae9548cf..5db6022cf1 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -11,15 +11,17 @@ try: import fastapi import tomli as tomllib import appdirs + import tomli_w except ImportError: import subprocess import sys - subprocess.check_call([sys.executable, "-m", "pip", "install", "uvicorn", "fastapi", "tomli", "appdirs"]) + subprocess.check_call([sys.executable, "-m", "pip", "install", "uvicorn", "fastapi", "tomli", "appdirs", "tomli-w"]) import uvicorn import fastapi import tomli as tomllib import appdirs + import tomli_w import random @@ -88,6 +90,7 @@ user_max_tokens = None user_temperature = None user_telemetry = True user_config = None +user_headers = None config_filename = "litellm.secrets.toml" config_dir = os.getcwd() config_dir = appdirs.user_config_dir("litellm") @@ -120,12 +123,41 @@ def add_keys_to_config(key, value): config.setdefault('keys', {})[key] = value # Write config to file - with open(user_config_path, 'w') as f: - for section, data in config.items(): - f.write('[%s]\n' % section) - for k, v in data.items(): - f.write('%s = "%s"\n' % (k, v)) + with open(user_config_path, 'wb') as f: + tomli_w.dump(config, f) +def save_params_to_config(data: dict): + # Check if file exists + if os.path.exists(user_config_path): + # Load existing file + with open(user_config_path, "rb") as f: + config = tomllib.load(f) + else: + # File doesn't exist, create empty config + config = {} + + config.setdefault('general', {}) + + ## general config + general_settings = data["general"] + + for key, value in general_settings.items(): + config["general"][key] = value + + ## model-specific config + config.setdefault("model", {}) + config["model"].setdefault(user_model, {}) + + user_model_config = data[user_model] + model_key = model_key = user_model_config.pop("alias", user_model) + config["model"].setdefault(model_key, {}) + for key, value in user_model_config.items(): + config["model"][model_key][key] = value + + # Write config to file + with open(user_config_path, 'wb') as f: + tomli_w.dump(config, f) + def load_config(): try: @@ -138,7 +170,6 @@ def load_config(): if "keys" in user_config: for key in user_config["keys"]: os.environ[key] = user_config["keys"][key] # litellm can read keys from the environment - ## settings if "general" in user_config: litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt", True) # by default add function to prompt if unsupported by provider @@ -191,24 +222,42 @@ def load_config(): except Exception as e: pass -def initialize(model, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt): - global user_model, user_api_base, user_debug, user_max_tokens, user_temperature, user_telemetry +def initialize(model, alias, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt, headers, save): + global user_model, user_api_base, user_debug, user_max_tokens, user_temperature, user_telemetry, user_headers user_model = model user_debug = debug - load_config() - user_api_base = api_base - user_max_tokens = max_tokens - user_temperature = temperature + dynamic_config = {"general": {}, user_model: {}} + if headers: # model-specific param + user_headers = headers + dynamic_config[user_model]["headers"] = headers + if api_base: # model-specific param + user_api_base = api_base + dynamic_config[user_model]["api_base"] = api_base + if max_tokens: # model-specific param + user_max_tokens = max_tokens + dynamic_config[user_model]["max_tokens"] = max_tokens + if temperature: # model-specific param + user_temperature = temperature + dynamic_config[user_model]["temperature"] = temperature + if alias: # model-specific param + dynamic_config[user_model]["alias"] = alias + if drop_params == True: # litellm-specific param + litellm.drop_params = True + dynamic_config["general"]["drop_params"] = True + if add_function_to_prompt == True: # litellm-specific param + litellm.add_function_to_prompt = True + dynamic_config["general"]["add_function_to_prompt"] = True + if max_budget: # litellm-specific param + litellm.max_budget = max_budget + dynamic_config["general"]["max_budget"] = max_budget + if save: + save_params_to_config(dynamic_config) + with open(user_config_path) as f: + print(f.read()) + print("\033[1;32mDone successfully\033[0m") user_telemetry = telemetry usage_telemetry(feature="local_proxy_server") - if drop_params == True: - litellm.drop_params = True - if add_function_to_prompt == True: - litellm.add_function_to_prompt = True - if max_budget: - litellm.max_budget = max_budget - def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, deploy): import requests @@ -354,9 +403,12 @@ def logger( existing_data = {} existing_data.update(log_data) - - with open(log_file, 'w') as f: - json.dump(existing_data, f, indent=2) + def write_to_log(): + with open(log_file, 'w') as f: + json.dump(existing_data, f, indent=2) + + thread = threading.Thread(target=write_to_log, daemon=True) + thread.start() elif log_event_type == 'post_api_call': if "stream" not in kwargs["optional_params"] or kwargs["optional_params"]["stream"] is False or kwargs.get("complete_streaming_response", False): inference_params = copy.deepcopy(kwargs) @@ -367,9 +419,13 @@ def logger( existing_data = json.load(f) existing_data[dt_key]['post_api_call'] = inference_params - - with open(log_file, 'w') as f: - json.dump(existing_data, f, indent=2) + + def write_to_log(): + with open(log_file, 'w') as f: + json.dump(existing_data, f, indent=2) + + thread = threading.Thread(target=write_to_log, daemon=True) + thread.start() except: traceback.print_exc() @@ -388,6 +444,8 @@ def litellm_completion(data, type): data["max_tokens"] = user_max_tokens if user_api_base: data["api_base"] = user_api_base + if user_headers: + data["headers"] = user_headers if type == "completion": response = litellm.text_completion(**data) elif type == "chat_completion": @@ -397,6 +455,7 @@ def litellm_completion(data, type): print_verbose(f"response: {response}") return response except Exception as e: + traceback.print_exc() if "Invalid response object from API" in str(e): completion_call_details = {} if user_model: diff --git a/litellm/utils.py b/litellm/utils.py index 6e614e6c65..002790306d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3207,28 +3207,32 @@ class CustomStreamWrapper: return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} def handle_huggingface_chunk(self, chunk): - chunk = chunk.decode("utf-8") - text = "" - is_finished = False - finish_reason = "" - print_verbose(f"chunk: {chunk}") - if chunk.startswith("data:"): - data_json = json.loads(chunk[5:]) - print_verbose(f"data json: {data_json}") - if "token" in data_json and "text" in data_json["token"]: - text = data_json["token"]["text"] - if data_json.get("details", False) and data_json["details"].get("finish_reason", False): - is_finished = True - finish_reason = data_json["details"]["finish_reason"] - elif data_json.get("generated_text", False): # if full generated text exists, then stream is complete - text = "" # don't return the final bos token - is_finished = True - finish_reason = "stop" + try: + chunk = chunk.decode("utf-8") + text = "" + is_finished = False + finish_reason = "" + print_verbose(f"chunk: {chunk}") + if chunk.startswith("data:"): + data_json = json.loads(chunk[5:]) + print_verbose(f"data json: {data_json}") + if "token" in data_json and "text" in data_json["token"]: + text = data_json["token"]["text"] + if data_json.get("details", False) and data_json["details"].get("finish_reason", False): + is_finished = True + finish_reason = data_json["details"]["finish_reason"] + elif data_json.get("generated_text", False): # if full generated text exists, then stream is complete + text = "" # don't return the final bos token + is_finished = True + finish_reason = "stop" + return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} + elif "error" in chunk: + raise ValueError(chunk) return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} - elif "error" in chunk: - raise ValueError(chunk) - return {"text": text, "is_finished": is_finished, "finish_reason": finish_reason} + except Exception as e: + traceback.print_exc() + # raise(e) def handle_ai21_chunk(self, chunk): # fake streaming chunk = chunk.decode("utf-8") diff --git a/poetry.lock b/poetry.lock index 16dd209f4e..d3960c8fce 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "aiohttp" diff --git a/pyproject.toml b/pyproject.toml index 1803baf130..7d0a5dd6ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.8.4" +version = "0.8.5" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License" @@ -26,7 +26,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "0.8.4" +version = "0.8.5" version_files = [ "pyproject.toml:^version" ]