From fda6dba1ec2439015f317fc5c88c5c6d53d3a812 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 1 Sep 2023 11:51:49 -0700 Subject: [PATCH] fixes to core logging --- litellm/__pycache__/main.cpython-311.pyc | Bin 28175 -> 28175 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 80294 -> 80382 bytes litellm/tests/test_logging.py | 297 +++++++++++++++++++--- litellm/utils.py | 22 +- pyproject.toml | 2 +- 5 files changed, 274 insertions(+), 47 deletions(-) diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index d9973c270e607d3b26d039dfc4f8e3de9df5f56d..f89ab450209acf37fb1b076adf9f96d018ff5634 100644 GIT binary patch delta 22 ccmeCb!`Od^k$X8WFBbz4)aiWM$jy=m08xboc>n+a delta 22 ccmeCb!`Od^k$X8WFBbz4t<8 diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 27f57b6daeff67388a2f6fb1bf5dd81fc425bf05..0cd1f95f6930b7e677cbe09c0b08d7f5e15afc76 100644 GIT binary patch delta 8092 zcmb7J33yaRw(jcg+gUmZouvcBBxHd=hXj&H!WNb+Y_iCr1Gu$Kx=8|^PN=?NiC_>F z*%Yya{t*4#MjTv$1_O#pSVSJ`fJSHLs<^%96a8?aK^#PecmBGen?z^6H|>|7b5EVB zI#p+>Q|0y!^FKSxanHuaMjPn2z3@l(tQ&U4dD0_SDF#EppfWM~my`v}5M@}-mK%uZ z{JD*#L@hUV;h7}Q+=XYBJQI&zZtB8Uz*j@#84PX<+C^qjs))#+#hBRDCs|zT6Csx7 zC(u`O(L~|Q=qbLLv7L%N+GUAx+iAhihZg<+(N|yGmrb`vm|5Funx@?Cnyd7Qm@#5vI z-r2S*OL!q+dnBnze^-|DAls0iZBQe{cUftsMAa%X=O&4>h1sICPmC!^&4O|wFUxA? zJ=AQ`oIO>nD;h3F_KEG?T72G26gHMkA43`>86eNsk+%j*@-^c}^2;^uzG*lLlTv(J>kXNqrf{;2RqQc`T_?@wK4 zG8k4G{wLCYGP(FvWJz0O$;rr)cH;yV)$emrIN%o3Uwb45-x|=%G;dQ`>ZBY)>!6hK zD8r#hM)aYm=n8}BP-1rZNYkMb1)PxzaSj_2h(4TH*?&T*@z@9>eH<%|o|LJ)Z7H)& zN>z?0TZnZ$RUy{#OcUswvV`kK2gl~^vGi#g_?ubzWB}VH>V{@#V&S?gPGd11KFotgb)Vptd#Kd8)d(t-AT@ zrr@IDXU&o8DE6+T2ot4n|(J6rEwPQFMMWIZb+tS+z)SA4Mh=! zaHOa#9VY!PN==c&pBXt#9L$ar<4P-Yt<#$Nb~nV7TGcaU%GCT>ja5}1zhC+pS>gWD z0c?$UrL>H^>>s72*2hRSzLubAn)JUFqY}+C=~HYR)r$%7;;7!`n;`NK0G8`+CTS3h zG>B&59)F`(<=Y|mEMN;@vxpo$Dr*YN@ixIq!>5tJyCTUk(as!4DV7z>nty zgYOdV8EY*1B;v;z+t|L~12bEF9A=rdv?$f%_0)M(zKkCinX^+J zf5spu0TqByhi)qI_r?6#ecAco`q||ci|n*Vd~j=H#y0fA*``HRdsJtYtE$GM+v5qe z91}IS4QlR|KVN}(T#d`;_IjN2b-&XhYFvJos&X#S8tZoXYv$FvxZByty?i+F$hpGG zNXajs83Y?=@RM6+_X&U?i zIDhXV;75+UJ!9-x-7uCaZ>`6ta!SaCy}>>6o?zJ>kpBsA9`G}P7E@K<=u@3){X&n= zPaD0&J=oS&gm;kV+rX+~rC zF<=-u`tWFxxgaZovcG|wnTV(1oC|D99DOeq>VnVNN8-kXV+K45(QI_2Yk?Ja((lJ*n=eE$kP+e6yaVyd{`@Svo_Y z5~%kuee-t#Gw?YRFc&aKe7O41%#CP~<;-DFIGtqt@q?fcwp1mhfZhVYL8-<25djod z+*Rdqa+m7Shn<6AHIya?_~fR_n&SoFP@+uH2}!db(s#Mwti*nh9(c2T-*Wb;l4Ehcb8Yi4y> zgGW<5i?M>`m{>ZREYu)JWkRFRS?dnlPv2=6hTj9YmtaTaRVu9px7+NPO?GU`87Izf z8<4OEjBC)<*qQ!C!J{e7yTCq9phbDejXiQNPlH(t-H>E7TUCuqbv97#I+bcA7kMi# zg1TMjLKJq@BC1>sDq{IFus#P!rw)Qzgbwl`#z8Axgx3)@xAS0zjC2J36v1cs48s}3 z-y?AJ(hrBdV1)eps+6qT7_D1G)?-uCWpGQ-wOZ`mRnQEx>!yHT@cYmr15uvpa^dU1lkO0%Pa0;pdI99PG$%t61RsawYmvFSi^vW8vb+Z217huw7nxlQJz8OJ1>+DvI>JFvhsDODDQvfR?r7!!sH!P+d8+gCY8r>j z<2e+xv=vW-yGL{!9d6kNa)T&(E7y7i0|87*u==fc%~pM8aCeC@Cvt7OAsdu^0ei)= z6XTmdLHn!p?MUXWpu1}Z-fqBQbdsKQ0MzS%Hvn$}q<8A7H&Wfob=AK{$NvRL?aqJ- zuOx!+k;m*23?Ns6+Zh#7Gv67jyh~(JF!0XVSnE^Z_5^Gf8?`)(737biRZC+X;xny+ zJuR|7u4TQ&{U3kN^2G8_?y{snWT!BF`j%B@Ww{rhh_+8BTV$v`E&83#N|DYv2ughi zK>4XJ8zEE#*Sx_}EKX(uVFD z{BLqzf}4Z(Z!IR)DpJo@Si>3Tv{-OXs-?ncem!?m$)h&41b`XX@s zq?w`fYRZp_{Xb{0SA*Km#f%Dn+l3UXjP)-t_T!@X!cz7|@TCj4F=i977kl^K0oSK2D?2U0Duq7Zn%pXGeplF4nO!8+ITbkN`M*WqvNlFv~G`5}HCY?l5(Z z18z^zd}$6lDNbEX4W7F+5*id-PS!KwMKR@au^n23J9Ca1vc#6l*D+0earshjtd{4H zkb}ua%ImtCfdNL=CvZJu_P{Mh)>l&O%YcWmL(GB3I`&P=79+Epn&m2Jh=08lEvByC z?eRM68`XwJRrmPn(9ALVI4%v#4w5Zx!o&6^@<8Gl(ZQ zUN2vA@Yx$SmI<^ks62oJ@FAcKB6--n1l^CNa}S{=&Fu3mRe6MwM0hWf(34#TQCg(a ziAJZB%cLqTkPVR{K!3n97(EwMzAff3`W0YiBu11t~M4>F9>f_m+Ew0AhR?jfm%UkWfjsl!BU&V#MBM4}Ez?6@@i)-f#M`f^$Yq{b zADEEBGLtZ&7U8FR4R-w%$m|O&PhoxI?4*{)7Vx3iu+4#&QrIx%2BN35e3!yzo3f-* z(kHWNQilnTPd=Ehp3CEc+XL=&maWXDKE9R>>Es!fT{M1Eu6X`Pin#5BC2%o=S(`(Z z59PaLXk(F~>F0Z)%rS7{amU{xy2*rB5!%Gk|dIqwu*{ zG_=u|B2UDd0xxGWhjKGDEeU*@N#0r7lAOgd%@$ocFe-;_ne!w%$sMus=@?cf2XxY- zQ)Ug^8a2yem$%X5*XJ#SfQ(k8AM1N*Z|#YJKdwy6EteDkmq;O_=J zPJrk2x_Ua`rPqZBuY8kw8KoE7V_st1xQ?hCsbz57gwbRAm7%ceJd{ zV>V;+KhZ|tI?y2^8PfFfM2H(djrX7xkDoT4e$()y(kRrfQ=$I)foy`1iC1Foae(mz zj>X(5SK+Q#=^;grfDJ{MSsLqBP^|#DwC&hsxxKOqlJ(3NXx$ySZZJz_7h0+Zv!Qff z2|QlFZf9R@DP+mb^U>*HNWBY?7d&~HlUF$0-gxM8reEC}z*_{^h#mqUpJEWzS`Q}$ znXjJ8CW;+DrBB?^PCP#?YYuTx^jtH~hU( zaT(~_Afn4NYZsx(7#1{+CUOy)yLl$bGu^;bCC|vC78!5go58Ik%pd3pB4NTN(# zGm#P&&AQ&o^2O=?siJkf#l+`cXWZQ>VY9?yvqf2Cc3DMEb~3Yxik!rX?iHDPg*z_3QGa8PmIy61 zET*`kwKOs4a37W?&Oe=Pre#NH31Z3qq_nxQT~f5ZxGvfiBjWd^xOxM=`6i8tk6P2k-8t(2L%r~hDvB#X%Y}MkPo7m=eLAIwf_MCyg568KVG9#~ zg`b2432U)!#OujhBWY3ePyaEp_^JWepe~N0Rv@N~$`}3f?A`nP>*5JgJX`!NFLp@U)RxQ!UtOKYTbJF$-PtwHhKB4~AJ1;| zx!euq*)8$ek9xE@I$vAZG@;CrHQ?Tb#R8^>O`@qV62GV6e*t1-^*5r*#*0z z6ZtgKj#mPjMX$n_MMt53@Mq*nOO*45Nz77k+Scn>dhM>HJ?Zox{!V}J6NOo-^7UCuSQjlHtvtjs~m3JKUf84xC+kb1fmbbRu$bb!fw`#J{;y(yEkMR~MTo z0hnB@UXQeu8pWVEeJD-LSQ~3JYGxTN53RMuBhB7>VG&)hljY)M%TdzZ7Ge9Eq1yvY0BX zvUWn;gj_>M4kNlFH>%34b`)2{RWWt{Xq8w8m`bbzW+P~8LDdjrjVjKsN{W1uhU(gs z6xx(si`7MHcuU%Du_e95dzm(|V*KP((^lHJZF2KSOU0$S%$p3Z2yu4&II-)jO*AB0 z#MlaK&chXXEK)3~D2lO=U9qHp2dordRE&>X0dgxqU$*E!Aw3HY8j7oRdK#L!+ks3` z?X0QSy?ULfo-n}rNA%E5D>h6hkGsic+eFjRWN~GJJ=zPC{Y26*nW(C~VqI3LR$7$( z7DHu>xT7j7?l}zg65w?}yJ)VOo;I1O1?QFjfe(uBs`3l^sPX5OFic=!vYV;&2^QbB zkKMBqLoNj@1B9d4Zq`@R9iDfiJshJH)v}k2VL6D5>wuVkd!f3UMArnD-u~xE_GIwR zd#o%G!5lI>o0{BS7k>>Sy(Q}IU18S86aTn(BMS!CPP^a4c8VXSXIIM+Pm!G6B(p)t zqPZK~jc$!ECek0hQwriW6}4Ypw<8EhwHA32GOmLa1HffceeE_7&fxZCN{)7snU z@)y9TfRhCN7y`K>3F0*DfA1{|5GS@VC5d59QZ40(n zZ((^CA%6*Q8330I*=l^vUd^HT>fK&HKO$OOC2=>3@Qc!uKX%2&U85eiQ@r^X6R|`& zBfj3#8f@p&CJKEK9esd1iobnY2;O3bJq*LO4effful6#6CXb-a%jw2IK&80RsUV<|zZ@;d2O}2#^ox4|oQxLqSQu zl{(sm$6v0FB<7~ztNt;jjBd?ih#9hYn;T{F@$GS=EHn<(bD zjG(xBvn6><5Yy<{tJVCa3TvulfyRoyneSs z^EA43KOHa19xGNSk$85n_OZe!c0jCtVikKrq%Y58Pm1Bo^P=A;73{@&c9|mTmX93K zL7Y&8+dJRGeO|r=UnY;&qw(RGc0GObj{(#0c^}|Gz)bP^@{Vb1(IN|m1E8V-2u1!r z0Fi2s;OU^{VTBVa6z_-cQ-pD6joZPUnp+=sCWZ}=Lnm4JN9Y2W9JH>O6>q+td%e$1 zGtYpSO?>d&a4Q^{--YjKB67py%q>=J7+U%#aNh*95xi#BZR!W3$4P=uc{aPm`3>dC zw`y^UaT`aqZigg%)gBcxq4Er=`q8}@6H9-Qr4Z!kkZ$%mJgy#xw4CD!jIkW>JA&6D zd#H5t2?rq=8R*zjc$>Jgv2W{6a5K==pvMvz_FTyd8Jm1f*EbYmZvkYMwz$bf-O|8c zgNZw&%sHXBnmJC*(L}*K#qIYy>)gB^ZE~|`LKK@Fil}imX$areGvIv#kS@Ff6h{wv z(9Q$31w0(5JxUts9r_`I*6!16H$Fchu&3#V#M@wmox4ZMmK$$wNAt6yVDpsTGJQ*f zKTeZTj-=z?;c{zK4fx4>)@&|}`WvJHn}Yi``x3gGr>t#|eLfVFo-B7y_a*Qi1uOux z03H!fx8<3C1JWsWwdLQr6y!33kP(Nd?hhO)@ZSQu{Xh z@WAy7jY1a?b_GUIhGuz#hftfvyg7FM+xYz?sai0Hm?6 zg8CL01``{0nhF^rP>gngRYMvRNWclLc~H1C1%yJFRWH&}`&KUi6c z#Lo8vq$9i!3Ryao)Zp}Mj#@XJKyH_V5<$Itp`QoE8wYdxLjREJ(cF!GK8FU^59n|V zlYL$Wcc-{?aHM%R$kn3kgMQY77#(oU6rB6PN2WMk9dO?wsg`{4rvvF3@-pve@K6yE zMBg29EWq1h(T5XTPecA4`nIR=UsGRCK?l%Dy4$;;-UI9f>;p&_)|b*uRWH|<@&iWy zD?n;{7F74jBKXcE^gRfWD?1G8h`9ggc=cl<2Lv}9Jr`|#3EVWmE8-8K;(QxO=|v-H zz?Ot2PKUad?eS-8K3{f&&s#@T+P9%f_OdAa!o$+VnlJv&28h-#mzp!sb-OU1{=h0T zytLVAaq9GBvy9W1Mc=P-TV;|N2xb2hfLZhf9!K{$d?o-A0mHzPc7ZiQYJ<<|^6S^i z?r3};lG46%i*I3m>GMgQiKl*wU~dK2|5V1P?ml`Y-74elTTHS= zbY5A&-VV;aI-3oTr~WOGxte={mRp|NV#rd3lGh}jkYBc^K<}u34J?}a^)p^!sV zDAM6Tqr;(RIk_YG5E%mK3)qU$`-2(?xbEM9dt#WKr36}HSe9uRHHdMS(`@=!zfe&4 zf7eb&>SRNb>OIC-AH!R%%Pk+;@U=V)@$3rR;{ zMlvg6@(r?3ts-Rxw{J^kGfZW;xKgCZ@RU&?YioH{$^$l=M(g1&75E$w7?IA(O66G& zCkjPU9l{+>??OFksQ#t~x|eo#smNvSSsmD%&T^74VJN~+l`6ec@OH@T4xC75`7tS^ zR*22zBj|s6E;VJa5$gR!SG8AUum_CH7I>~V%P>o|QUd?co8_r9sJXWN+ur08Q)T(m zcaEktm6w#14wQe&hnAL>mf(p>k6x*mLv#16Ybd3rLMO8n@$)iwE3fcM@ zFJct_^zhk(>kwk@z9(wng1l~sw#tmJ+s^L<`<<$rm9F9wk zUZSJymUSkV#UaW*S(eGFOV5B+hBRU#o;jkHO9zE_@%q3UgIN|^+Wz5S_E%$RlabN8 o{H?K5@-Oymozwo^Fy=OtPBAcg>c2I1O8&*3t<`}ABiZu*2A?4PRR910 diff --git a/litellm/tests/test_logging.py b/litellm/tests/test_logging.py index 37caeffa97..567103c85b 100644 --- a/litellm/tests/test_logging.py +++ b/litellm/tests/test_logging.py @@ -1,66 +1,285 @@ #### What this tests #### # This tests error logging (with custom user functions) for the raw `completion` + `embedding` endpoints -import sys, os -import traceback +# Test Scenarios (test across completion, streaming, embedding) +## 1: Pre-API-Call +## 2: Post-API-Call +## 3: On LiteLLM Call success +## 4: On LiteLLM Call failure + +import sys, os, io +import traceback, logging +import pytest +import dotenv +dotenv.load_dotenv() + +# Create logger +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +# Create a stream handler +stream_handler = logging.StreamHandler(sys.stdout) +logger.addHandler(stream_handler) + +# Create a function to log information +def logger_fn(message): + logger.info(message) sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import litellm from litellm import embedding, completion - -litellm.set_verbose = False +from openai.error import AuthenticationError +litellm.set_verbose = True score = 0 - -def logger_fn(model_call_object: dict): - print(f"model call details: {model_call_object}") - - user_message = "Hello, how are you?" messages = [{"content": user_message, "role": "user"}] -# test on openai completion call +# 1. On Call Success +# normal completion +## test on openai completion call try: - response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="gpt-3.5-turbo", messages=messages) + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") score += 1 -except: - print(f"error occurred: {traceback.format_exc()}") +except Exception as e: + pytest.fail(f"Error occurred: {e}") pass -# test on non-openai completion call +## test on non-openai completion call try: - response = completion( - model="claude-instant-1", messages=messages, logger_fn=logger_fn - ) - print(f"claude response: {response}") + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") score += 1 -except: - print(f"error occurred: {traceback.format_exc()}") +except Exception as e: + pytest.fail(f"Error occurred: {e}") pass -# # test on openai embedding call -# try: -# response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) -# score +=1 -# except: -# traceback.print_exc() +# streaming completion +## test on openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() -# # test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model -# try: -# response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) -# except: -# score +=1 # expect this to fail -# traceback.print_exc() + response = completion(model="gpt-3.5-turbo", messages=messages) -# # test on good azure openai embedding call -# try: -# response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) -# score +=1 -# except: -# traceback.print_exc() + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + pytest.fail(f"Error occurred: {e}") + pass -# print(f"Score: {score}, Overall score: {score/5}") +## test on non-openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + pytest.fail(f"Error occurred: {e}") + pass + +# embedding + +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"]) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") +except Exception as e: + pytest.fail(f"Error occurred: {e}") + +## 2. On LiteLLM Call failure +## TEST BAD KEY + +temporary_oai_key = os.environ["OPENAI_API_KEY"] +os.environ["OPENAI_API_KEY"] = "bad-key" + +temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"] +os.environ["ANTHROPIC_API_KEY"] = "bad-key" + +# normal completion +## test on openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="gpt-3.5-turbo", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + print(f"exception type: {type(e).__name__}") + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +## test on non-openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +# streaming completion +## test on openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="gpt-3.5-turbo", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +## test on non-openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +# embedding + +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"]) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +os.environ["OPENAI_API_KEY"] = temporary_oai_key +os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 3aaf00b9ec..053aa19d17 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -180,8 +180,10 @@ class Logging: } def pre_call(self, input, api_key, model=None, additional_args={}): + # Log the exact input to the LLM API + print_verbose(f"Logging Details Pre-API Call") try: - print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}") + # print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}") self.model_call_details["input"] = input self.model_call_details["api_key"] = api_key self.model_call_details["additional_args"] = additional_args @@ -193,9 +195,6 @@ class Logging: # User Logging -> if you pass in a custom logging function print_verbose(f"model call details: {self.model_call_details}") - print_verbose( - f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" - ) if self.logger_fn and callable(self.logger_fn): try: self.logger_fn( @@ -257,7 +256,7 @@ class Logging: capture_exception(e) def post_call(self, original_response, input=None, api_key=None, additional_args={}): - # Do something here + # Log the exact result from the LLM API, for streaming - log the type of response received try: self.model_call_details["input"] = input self.model_call_details["api_key"] = api_key @@ -266,7 +265,7 @@ class Logging: # User Logging -> if you pass in a custom logging function print_verbose( - f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" + f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" ) if self.logger_fn and callable(self.logger_fn): try: @@ -331,6 +330,9 @@ class Logging: def success_handler(self, result, start_time, end_time): + print_verbose( + f"Logging Details LiteLLM-Success Call" + ) try: for callback in litellm.success_callback: try: @@ -364,6 +366,9 @@ class Logging: pass def failure_handler(self, exception, traceback_exception, start_time, end_time): + print_verbose( + f"Logging Details LiteLLM-Failure Call" + ) try: for callback in litellm.failure_callback: if callback == "lite_debugger": @@ -1699,6 +1704,9 @@ class CustomStreamWrapper: self.model = model self.custom_llm_provider = custom_llm_provider self.logging_obj = logging_obj + if self.logging_obj: + # Log the type of the received item + self.logging_obj.post_call(str(type(completion_stream))) if model in litellm.cohere_models: # cohere does not return an iterator, so we need to wrap it in one self.completion_stream = iter(completion_stream) @@ -1825,7 +1833,7 @@ class CustomStreamWrapper: completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk) # LOGGING - self.logging_obj.post_call(completion_obj["content"]) + # self.logging_obj.post_call(completion_obj["content"]) # return this for all models return {"choices": [{"delta": completion_obj}]} except: diff --git a/pyproject.toml b/pyproject.toml index d226ddfeba..1eefe547fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.512" +version = "0.1.513" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"