From be0cd1ccf8ac95e57fc024a4f104ff730a18552a Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 1 Aug 2023 05:26:08 -0700 Subject: [PATCH 1/4] Update setup.py --- setup.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 4d8ed7b017..8390a05190 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,11 @@ setup( install_requires=[ 'openai', 'cohere', - 'func_timeout' - 'pytest' + 'func_timeout', + 'pytest', + 'anthropic', + 'replicate', + 'python-dotenv', + 'openai[datalib]' ], ) From b6dcddefdf8833202c511e5898e636a675c6bc47 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 1 Aug 2023 11:01:47 -0700 Subject: [PATCH 2/4] exception mapping --- litellm/__init__.py | 2 +- litellm/__pycache__/__init__.cpython-311.pyc | Bin 690 -> 721 bytes litellm/__pycache__/main.cpython-311.pyc | Bin 11380 -> 11665 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 12674 -> 15050 bytes litellm/main.py | 13 +- litellm/tests/test_exceptions.py | 129 +++++++++++++++++++ litellm/utils.py | 45 ++++++- 7 files changed, 182 insertions(+), 7 deletions(-) create mode 100644 litellm/tests/test_exceptions.py diff --git a/litellm/__init__.py b/litellm/__init__.py index 38697c7519..7ed52d7cd3 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -25,6 +25,6 @@ open_ai_embedding_models = [ 'text-embedding-ada-002' ] -from .utils import client, logging # Import all the symbols from main.py +from .utils import client, logging, exception_type # Import all the symbols from main.py from .main import * # Import all the symbols from main.py diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index 73352d569b34079e00484ef045a889fb1748fcec..3e9ac33f0fa3155ad41786391a11d4482cb3d8ac 100644 GIT binary patch delta 163 zcmdnQdXbfPIWI340}wpqKbaCbkyny2V4}JsPcC;9HzPv|M+#RC&%}ZnE#_Nn$vK&+ zc_p{lbMn*EGxO4K@ugNIrxuiC=I6ziR2HOaGTvg;^3&v<%*MEvsfcy*0mcX}5umw@ dKwR86S&1px*!Tj25iWFtLHGhHDiQ+f1OQ?3FMR+2 delta 134 zcmcb}x`~x{IWI340}u$tpGa|?$ScX{Gf~}Xd1poEETFbH2jMMZ)@ FJph*hBwGLg diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 11eb85d841f7d983653af7afa7b2b1cf73f994e8..4959766b6752a40ea8ffe15adba15f6ed7b5d2b4 100644 GIT binary patch delta 3748 zcmb_fU2Gf25#Hq;e-$m9LU1OC zk%&Y`A+Qjd2`vaSf-%O0@J!f-B>#Mbu6e_2e-GlewSRZwDDA)ljBTemJcwiTIzHuW z7lW^&nGX6fzR<<#j$9!x$*S%w6zAsh@|^CIR&&y#nlH+;>JJvB&1Y<{V8I8@=-wBX z>2gC6WkVHdsX6lkm~4C8xlp~bO| z5xN7v+NHW^F;FKjI~(PuCPK4rlUD_kLr6m!*8Eyv!?g~+-m(0}NVW9A_a@U~zS-o& zVP-43Z`rigpW+|mQt-OnA!jtC#^{4UB>qFuvokEqunFA0>J0HBYV@Dp?GBXdb7A@L zrTXlx^O=`7naieg2<@Nun}cOU-g?3)qPs>R`fdkU9kHMOqN(RtLr|eT3Q_l}`4-c+ z+s*WMfyj7hBd`uIx?@0z$)F6p%!w@;0`DL04#3+E`dm{_OgXcs7<^0zcVdYQ{V(mY z2JMod3A^iytr>9B%0vhN;~J6{*8E~r zbLTN&IJRzwRKTxx0fzfb9xxm-dBAOWqrD+*5wU|7n$dR!-G}RJAN64){n4kr4JZ!EpEMw}zL~xGgzi74nGJkN&Fttd%}gF^0O(#{ ztCIlns6U}TWfl|q#~{7g8ZS>in(0C0Kq>jfH=2;wk{bus9ROzd)h_5E-!ORtUdfH2 zojJdTMN~>&Nv_}(w8~AR6`USX6z^somPY2#|irF&@g_PmP5U+E|^W*$S{2; zw9@f4rgwyq2_Opl5F`>L!6uH;6GFF8X6$iBm>oI6NPw;iiD|Y6(sp93G{UCY^144O zig|YY$rfg_WKJRdObru5!#E&0b1xP1IY}XH%!sY1yR(XtSJbSmf@6p21EDKN&~?oHDiFP?P|S+6Vo`-{K@8xtjbi|bFo$Z3 z8Xe1MNC}wxmn*YS6qgFpG4dJ=p-Uho1&yW3Vfbf-(v|I-UfBP$j|8m{t&!lZ-x1~2l=wf_P!w%wTk delta 3758 zcmcgvTWlN06`kSoDe)nRTt4+6^`J~W-Kb7vN3z_&Ng6Z`>?Cd}83jXW8JY^c6lPa) z>?|$ANWYr4wx=HfY!^;iCGcC$&U4+7VU!Z<4tf{|kMk!&0*^ z6bm_3uQ`gPe7>OOEAy7~RQAB8HP5Td>VjSAJek`qU}ZX34`e2h6spb(8aYv z*c*y#n?Yt2l$LFLGa?Gncj0sE|J`Tz7N4zl^?mf2cejQ1z574$FtsZW70-1a6qiiB zyyzR4-k#&YLvn1U08-V~NY(XLcu#bwo@gh+P7y# zJF=5BA8qwT+1R5*`pdE*f3=X(J_Y8RG4MkS`ol`V2oz{JXt?yghI1e!^D%d9HHZp^ z%LpnVr1b3p3qp^F4Of9eYed#;5CpjE7qrGegNCq~de9ERThXR~g_Ky?Z-fq$?>ow2 z!*7JoLPwPSZ#h?$PNhrfHvCGD62EFu5?7@>1!E&dL`2!%)JTEGmJs>B=|hbtLavZ< zz@V_f(Q;JjHKMToxB*ne6jA!_4=dRSn3^tFzCu8-pC9!^+DA<3O|C3YZBf`NFGr5S z$~GP0S3n=>d@6U{ zk7d`Q>2z~c^?jQTDuYJk3b|(4UZIqT$XnEorpU({nh@E`ddQqkkb84nin%V+Jv{2$ zFEtu5g^ehi^&y7s{YK<#zR}eOn#(MSZqpCa?1(Uus;tS#t#kIm?xv7;^XS>tpk3J2 z7;4J!!wwg7q(BtS!=;|(PCQp`xtkivum05>v+wNA!LRAx*Bi6^HDB_-Gi0B9pbpfD zT}E*Rx-I7UR--^+whi;YcXXwn+T`>T)>TeC_`V(bE^&b>q?|aHI89HJ*QHr(V4aR42Af#+e>^F{VG@|Cq zDoO!A>axX5mgO}zh+>op3GvzISU6cKEaWs6LM7a>=E`bXLDRFU4u(zgv%%g3o&)ST zgieG>fWioTOr@myjx+prkUi4;*59FW-F$u~H1%kOe{50sGPHM(2u`Y(#gYFQK+Tz5 zDrEFCOSzh*q}3cbb)~>cDt|BhGkX^b9pc}K9QG`+f~sd$a%{e&<@k>xL;QB6+Y2u; z^CMFDWIvu?B6K2BY!5zmi+xy5TPmqqj?M5R(Fh&q`Dk36ln^gfKgmiAl(_D9-&+wz;1-+F)Ttv)+T~4X;T>KoJ1L+OYfInGVFAlu@`iYH0M#&|CI*tdniQCvHQ@FK!X2rnZXNBAxR zPPv-v=~MGLC{{Jk(#V`grljeN&GJ7dyN~6Oc@iMyXex3ha)yP?77-YL<^YJ-FVR0W zSI&I@8Cd&*R9+bT38e}C$(}FiEdM06-zPGL`-;uuJUR4gNVI2X-2vf`g6oI)&xgin ziT`0JxjKss{ALh-F#Iooxe>|$wbo)Ot7Mdt4u#1?LB?@S)CJlhwyL*0ZY6B}+&Ois zq%0S62iXsRN8#3bpffilO6fXDRk!}}>%&fdaX1XOo8K7LgMC%<6@Pqk5oz>c2(M_?f$5$l?oDVRp}b}M>JBTtXfs7v_G=lfBU2AIWu;G zv#iw7ozLgqbM86k-gD>NUZlcAZq$+{VFQk)4-hG!y^5uS5#3O6m?e|K}Sp73%M-UfIZ;cbGqd5V)a5Pou_ z%1y?I`yFm_6Y;1V;R%1mL%i?slbeYEqg#j%u!Z;mTS)*gPJ)05!e8K$p?{-0<4Ld8 z(4RN1Dw?6DWkbzsLzHHz)iA2vkTZiSIj>yLE4tx~hR@4}GN#U`mHvhc3caDGl}nm@ zLsm2L<&5G8hR);^t$)<+{N}%J^WF#;i<#ebWzGqeZNML--SB?V3ix9#z^UAAp}-Xc z!XJU*0uPkSrd&X|ZOTi%1uyhG&N%U!``o%)AcFbWy*VHdAC4P>`SYg6P7w1uT^Or8 z)4nPN+yHX5c@iv`InN2PIv^J~gEQ;v8qMpTUen{1K!3peo#%{ss4>!3omX%J6}I(n zuIl7C5-AAgKJQUpFm-PSFPT5_zTH_rZh2?2M9ElQRmEM2MM_z5{&3e9zaj(?_E?)PT^4&bN`HimTOZ^LIw26^{2FUhF-@gpDlL zRT6eDobo^8x9%J2(0W3bVh~4 zPS3#Lp!uKreqLJWi{^!fi!fk?MrU$a>YQ#GG5@&nWT*|s>1zn>W+c{H#CX#Qgh>P$ z;Vl3wpl9UtwUnI8Sz$S&P(x3p!Ea5;B)zm3CH!eQler8+6pcCaajNbGu%l!9NDm_y z;R-VUWCvzB8eies+=mxFjGYcjDPUL}#l8E|btqw(f%YEZ9Q5dfdA$9@vvd~B%N)$K%_`cjvM>8o*fx5%%L)K+*Cu3We|7a_a`AIpmx%`#YMyc^%0t z3VE(6v%0+rmM<;m3_Qx7d|oA%7qM?G+;lt0Cneh-dI}XdkweNr$Rkkmw;fj^crfWm z6@IX=ecMw(aD>dycl8!;BFogZhrdvynpvL}8Isdo_O z5x$S$tPrX1AlR$4y$;s;I#6{MrGA9)Lxg(>_YvL&&@unpp?oQxgUCgAzVOekU-88> z#;y8b?Cl(Q%g%uYUu9taGhUK>2=wFCK6MrNyL^GW$J3t~sH5@}_=lk3@v4^FF2h&l zfky5Fj`;Y3>jUnmZu4(jwiLmJe~rJ+Mw1a*w8q^! zd&gb18vz#GM6B3ptns%Z|0*v@xT1f}uOdgj-aw>cAZomPNqvpHcA>x#sWRg)}X4O?WXhcll)V_*^{Em8f2s ziRulfB_L73T?RZmAI!D9QYvN4<`m#qa0SSDqRff9Asczf(r_^!N~rhYo0)6s*Nm$) zn^V(6_Jv%8>}Uegaw12wH&mjK-E+}_>{W%2d0qCZOT!!4;<+Qgc7l%mOUGHNR`)uzClk(ZN$4*N z{Y>az3N*0BZ6ArH!2V)6uz!Vf`?f5_T3BmODb`!Yc4$64A6|}ao)1^_46wR^C&vJw zm4v~vFvx_##X$YNODwPn+L9lP53_9t%i9K#XdW*4N6P*Y<{w!MwUk52M;Bg%dRVAu zNqUX7e}f$vFCTdWh$6ftX`(Dmu;9d^7%z*RkMtMf9wzQtCyBG(yv!y^Nm9y^!h*_r z@m3bUzz&R-51d0HexW2?EK3(z@ZyS_3-4VHHNW4%LUCw|q0UFMEOZcB(Y7-3@hEE_ zfmV`EmZg&{cydwP{C?qu*vZ7sT8ZaF&o`|qUKHCO1(?_iZAk+2C!Wj#Kr2ZDWodu~ z2S5*tcfSz3nb^G~ZD%`9eG&kGR+7fb(pZrN$I!i=<&eZ0J6?pq#g4^L>w_ci7 zvcBPR-#9DA&$9U0HL6di>~GF0hB8^1q#P48gVV7Bqia>DUB1{wOE2mT=XLl4 zu=+p1DTVNswJUwQ>~-qP5gZ#tcz`gDfGe`UurK{b;O|O4qtqRv4`C1zAE!^7V?8%_ oE^;^7|JTev_k1hHE={a(aXzpfhRuDw$6JnU9(E6vywK_c340^i4}^+dBQ5yU-l}a!gnc~+XmSgM>d~6MEQ+fE^hl7{Q2y@s%LjBd(^%`e_+ewlgBq@?K zNgIhZ&+Lrgh4wNlbco0*E9?|nnWE=3p|A2nr!dIYB~0QIB1-HQF=CJ4=UF~}A3uD# zwaQnxfwa2S4y|(B*h~YHNrJQ?@`4q56CDU0RSA*&O{8;Oi^wkI-SKnKQ%uR1P z(`Ak$a6%1b_VEL%kl9P&%b5XwR(+Crke`Ng1~@=)(cjB`Q(^jMsZw^To;*p$DfMOh zF`jJw-QF-ePLsh;o?5Ecq&F>PP0EB??Kzp;MRxfluv`7wv%B+osONz5KmoX@621Ks zL!|mK6mQlpElO+{##|)L#jSf>M)@4Pz-()_x0tXlkm@rTp=l#B*|(v7+&gN%grIXO zI&gQ{k2`OcoVr)8QJZWb%FyiXHsw2IP{8ZhMyuEAQpegcU>|I8r`lL@r0qGr=6X_= z;Xx66t8~ToiuIc7opxN;rmL_3-xc`uMdzA})2e54-J@as;p`{mE%_Go6F}GxXiGp9 zSX5qat__z)PHo{<>wa#{G{Yd(8XjA&!xhx?8Z^{FUIirJ0u4ewEpay41cI}TLJJnY z2kjl;2CxEbpHD91GdNO^b>hFd9^Xn9QMZ7b!27@lz$(E-ZG-0htAj7V3#_$nkKEzQ z`Ci`?HP5NO?&}L~tr}kDLx{40Ah#g%;7Z|b%P7H5R%*5=hD)UbGiom%U$o#X!3l;C zpKiGHT(@6sSeL{U*5FMBT@CM7U<>km5soB)er&`J(@SquoFh`vCns@7)P;j@s7D81 Y?OJEMc>MpEQq{?$X_n>Dbs*U5A4-cycK`qY diff --git a/litellm/main.py b/litellm/main.py index b5a6b1c8af..48d3dbed95 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -6,7 +6,7 @@ import traceback import dotenv import traceback import litellm -from litellm import client, logging +from litellm import client, logging, exception_type from litellm import success_callback, failure_callback import random ####### ENVIRONMENT VARIABLES ################### @@ -76,6 +76,7 @@ def completion( temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop, max_tokens=max_tokens, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user ) + print_verbose(f"os environment variables: {os.environ}") if azure == True: # azure configs openai.api_type = "azure" @@ -120,7 +121,7 @@ def completion( elif "replicate" in model: # replicate defaults to os.environ.get("REPLICATE_API_TOKEN") # checking in case user set it to REPLICATE_API_KEY instead - if not os.environ.get("REPLICATE_API_TOKEN") and os.environ.get("REPLICATE_API_KEY"): + if not os.environ.get("REPLICATE_API_TOKEN") and os.environ.get("REPLICATE_API_KEY"): replicate_api_token = os.environ.get("REPLICATE_API_KEY") os.environ["REPLICATE_API_TOKEN"] = replicate_api_token prompt = " ".join([message["content"] for message in messages]) @@ -207,7 +208,7 @@ def completion( "finish_reason": "stop", "index": 0, "message": { - "content": response[0], + "content": response[0].text, "role": "assistant" } } @@ -246,8 +247,10 @@ def completion( raise ValueError(f"No valid completion model args passed in - {args}") return response except Exception as e: - logging(model=model, input=messages, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn) - raise e + # log the original exception + logging(model=model, input=messages, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn, exception=e) + ## Map to OpenAI Exception + raise exception_type(model=model, original_exception=e) ### EMBEDDING ENDPOINTS #################### diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py new file mode 100644 index 0000000000..38be0e2c15 --- /dev/null +++ b/litellm/tests/test_exceptions.py @@ -0,0 +1,129 @@ +from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, OpenAIError +import os +import sys +import traceback +sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +import litellm +from litellm import embedding, completion +from concurrent.futures import ThreadPoolExecutor +#### What this tests #### +# This tests exception mapping -> trigger an exception from an llm provider -> assert if output is of the expected type + + +# 5 providers -> OpenAI, Azure, Anthropic, Cohere, Replicate + +# 3 main types of exceptions -> - Rate Limit Errors, Context Window Errors, Auth errors (incorrect/rotated key, etc.) + +# Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered + +models = ["gpt-3.5-turbo", "chatgpt-test", "claude-instant-1", "command-nightly", "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"] + +# Test 1: Rate Limit Errors +def test_model(model): + try: + sample_text = "how does a court case get to the Supreme Court?" * 50000 + messages = [{ "content": sample_text,"role": "user"}] + azure = False + if model == "chatgpt-test": + azure = True + print(f"model: {model}") + response = completion(model=model, messages=messages, azure=azure) + except RateLimitError: + return True + except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server + return True + except Exception as e: + print(f"Uncaught Exception {model}: {type(e).__name__} - {e}") + pass + return False + +# Repeat each model 500 times +extended_models = [model for model in models for _ in range(250)] + +def worker(model): + return test_model(model) + +# Create a dictionary to store the results +counts = {True: 0, False: 0} + +# Use Thread Pool Executor +with ThreadPoolExecutor(max_workers=500) as executor: + # Use map to start the operation in thread pool + results = executor.map(worker, extended_models) + + # Iterate over results and count True/False + for result in results: + counts[result] += 1 + +accuracy_score = counts[True]/(counts[True] + counts[False]) +print(f"accuracy_score: {accuracy_score}") + +# Test 2: Context Window Errors +print("Testing Context Window Errors") +def test_model(model): # pass extremely long input + sample_text = "how does a court case get to the Supreme Court?" * 100000 + messages = [{ "content": sample_text,"role": "user"}] + try: + azure = False + if model == "chatgpt-test": + azure = True + print(f"model: {model}") + response = completion(model=model, messages=messages, azure=azure) + except InvalidRequestError: + return True + except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server + return True + except Exception as e: + print(f"Error Type: {type(e).__name__}") + print(f"Uncaught Exception - {e}") + pass + return False + +## TEST SCORE +true_val = 0 +for model in models: + if test_model(model=model) == True: + true_val += 1 +accuracy_score = true_val/len(models) +print(f"CTX WINDOW accuracy_score: {accuracy_score}") + +# Test 3: InvalidAuth Errors +def logger_fn(model_call_object: dict): + print(f"model call details: {model_call_object}") + + +def test_model(model): # set the model key to an invalid key, depending on the model + messages = [{ "content": "Hello, how are you?","role": "user"}] + try: + azure = False + if model == "gpt-3.5-turbo": + os.environ["OPENAI_API_KEY"] = "bad-key" + elif model == "chatgpt-test": + os.environ["AZURE_API_KEY"] = "bad-key" + azure = True + elif model == "claude-instant-1": + os.environ["ANTHROPIC_API_KEY"] = "bad-key" + elif model == "command-nightly": + os.environ["COHERE_API_KEY"] = "bad-key" + elif model == "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1": + os.environ["REPLICATE_API_KEY"] = "bad-key" + os.environ["REPLICATE_API_TOKEN"] = "bad-key" + print(f"model: {model}") + response = completion(model=model, messages=messages, azure=azure, logger_fn=logger_fn) + print(f"response: {response}") + except AuthenticationError as e: + return True + except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server + return True + except Exception as e: + print(f"Uncaught Exception - {e}") + pass + return False + +## TEST SCORE +true_val = 0 +for model in models: + if test_model(model=model) == True: + true_val += 1 +accuracy_score = true_val/len(models) +print(f"INVALID AUTH accuracy_score: {accuracy_score}") \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 593b754f03..c7eaa96d2b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -9,6 +9,7 @@ import litellm import os import openai import random +from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, OpenAIError ####### ENVIRONMENT VARIABLES ################### dotenv.load_dotenv() # Loading env variables using dotenv sentry_sdk_instance = None @@ -29,12 +30,15 @@ def print_verbose(print_statement): ####### LOGGING ################### #Logging function -> log the exact model details + what's being sent | Non-Blocking -def logging(model, input, azure=False, additional_args={}, logger_fn=None): +def logging(model, input, azure=False, additional_args={}, logger_fn=None, exception=None): try: model_call_details = {} model_call_details["model"] = model model_call_details["input"] = input model_call_details["azure"] = azure + # log exception details + if exception: + model_call_details["original_exception"] = exception # log additional call details -> api key, etc. if azure == True or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_embedding_models: model_call_details["api_type"] = openai.api_type @@ -222,3 +226,42 @@ def handle_success(*args, **kwargs): success_handler(args, kwargs) pass + +def exception_type(model, original_exception): + if isinstance(original_exception, OpenAIError): + # Handle the OpenAIError + raise original_exception + elif model: + error_str = str(original_exception) + if isinstance(original_exception, BaseException): + exception_type = type(original_exception).__name__ + else: + exception_type = "" + if "claude" in model: #one of the anthropics + print_verbose(f"status_code: {original_exception.status_code}") + if original_exception.status_code == 401: + raise AuthenticationError(f"AnthropicException - {original_exception.message}") + elif original_exception.status_code == 400: + raise InvalidRequestError(f"AnthropicException - {original_exception.message}", f"{model}") + elif original_exception.status_code == 429: + raise RateLimitError(f"AnthropicException - {original_exception.message}") + elif "replicate" in model: + if "Incorrect authentication token" in error_str: + raise AuthenticationError(f"ReplicateException - {error_str}") + elif exception_type == "ModelError": + raise InvalidRequestError(f"ReplicateException - {error_str}", f"{model}") + elif "Request was throttled" in error_str: + raise RateLimitError(f"ReplicateException - {error_str}") + elif exception_type == "ReplicateError": ## ReplicateError implies an error on Replicate server side, not user side + raise ServiceUnavailableError(f"ReplicateException - {error_str}") + elif model == "command-nightly": #Cohere + if "invalid api token" in error_str or "No API key provided." in error_str: + raise AuthenticationError(f"CohereException - {error_str}") + elif "too many tokens" in error_str: + raise InvalidRequestError(f"CohereException - {error_str}", f"{model}") + elif "CohereConnectionError" in exception_type: # cohere seems to fire these errors when we load test it (1k+ messages / min) + raise RateLimitError(f"CohereException - {original_exception.message}") + raise original_exception # base case - return the original exception + else: + raise original_exception + \ No newline at end of file From d77d8b7cd786e747b6aeaf3c9cf6ff9a98d16941 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 1 Aug 2023 11:29:03 -0700 Subject: [PATCH 3/4] cleanup --- litellm/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 48d3dbed95..ec2de634f9 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -76,7 +76,6 @@ def completion( temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop, max_tokens=max_tokens, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user ) - print_verbose(f"os environment variables: {os.environ}") if azure == True: # azure configs openai.api_type = "azure" From c0502585303ad06c866d2d1953cedc307a6ee2fc Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 1 Aug 2023 11:29:24 -0700 Subject: [PATCH 4/4] cleanup --- litellm/__pycache__/main.cpython-311.pyc | Bin 11665 -> 11556 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 15050 -> 14845 bytes ...st_bad_params.cpython-311-pytest-7.4.0.pyc | Bin 0 -> 1580 bytes .../test_client.cpython-311-pytest-7.4.0.pyc | Bin 0 -> 3738 bytes ...st_completion.cpython-311-pytest-7.4.0.pyc | Bin 0 -> 6216 bytes ...st_exceptions.cpython-311-pytest-7.4.0.pyc | Bin 0 -> 6040 bytes .../test_logging.cpython-311-pytest-7.4.0.pyc | Bin 0 -> 2352 bytes ...odel_fallback.cpython-311-pytest-7.4.0.pyc | Bin 0 -> 1468 bytes litellm/tests/test_bad_params.py | 4 ++-- litellm/tests/test_completion.py | 3 +++ litellm/tests/test_logging.py | 15 +++++++++++---- litellm/tests/test_model_fallback.py | 3 +-- 12 files changed, 17 insertions(+), 8 deletions(-) create mode 100644 litellm/tests/__pycache__/test_bad_params.cpython-311-pytest-7.4.0.pyc create mode 100644 litellm/tests/__pycache__/test_client.cpython-311-pytest-7.4.0.pyc create mode 100644 litellm/tests/__pycache__/test_completion.cpython-311-pytest-7.4.0.pyc create mode 100644 litellm/tests/__pycache__/test_exceptions.cpython-311-pytest-7.4.0.pyc create mode 100644 litellm/tests/__pycache__/test_logging.cpython-311-pytest-7.4.0.pyc create mode 100644 litellm/tests/__pycache__/test_model_fallback.cpython-311-pytest-7.4.0.pyc diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 4959766b6752a40ea8ffe15adba15f6ed7b5d2b4..1490fcfea06c299d2e6fe443ca1604ce2a8e8d07 100644 GIT binary patch delta 2595 zcmcgtU1%It6ux)=vOk&5W@l$-XMfV>x81axJhUkUc73S&AXsVD)}%iK>s`Wb?Ig?& z0ries1@Xm7d}94yxwRz@KX|*GeY2FA1;uXyP`06Whwo;FCs)A}j4e04}-uGBRHx z5l=vZ1rqhx1tSDm`^Y)=zvtZFk#itqw0X{KFs@C%_n%}~*jI)`T9cugsr?0ZjcJGgXGYvj|{E)7cc~?Qyp0L8?mlIO1hr}=6|#%+$V%qke#F$K5)j)kYSpE^OF=b zT|mgl_OeoGQg}w-cm)A)l%0|Efnx%_V^!5sheIb`)z89-$H&PlX-%tY$&a&MMtwk9@-Xvsi>=33)T*iGwVKiT;vq8CnYd|% zm{xz+NGZnmV&T5qD?yjI2hQ)o7B0IgkN-RCwy>)sjN|ubk$sSu!AIG>#8lqfU%;|E zQFN$vjGa)1aFLx?h5}Ph6#CiEda3=DasuNe7FK`2FSmbDU%~h@wy3>^e`3FDujkC; zK-)(6*`?~d^}wFm)LjX*Dz|IK77-M zFR^b64@LQ*gBWJx6JLVykBOO>6RYi7%Q6rS02;=lMO-d#I(oWzcuA17&2T2v|2DM$pQ3UMl>O(2RAjTPBR*6Dh& z)G&(yfw%+|mz@)4Bp7*S zpWl1)_WgYC+1KY^eZuvY%jE!c-G2BhwEDj5$=YB5c9NeQ0=aDUK^wVYm&oUX{yOh#omaIr@d^N0ix7Z4x@$|d z8597Ec7s+Wg8~2p46z4$OSYng-lV&>AW%(x05VR4pXo9vG{x>B`!0ul*WoKr6XM$) zauNeIWT?=KZiCJP?q!v%uV3=GJXc#=| z#(<{zcZwX6|mRr9yLV^s&e;=p$7M*(I?r+OIG21Y;agdn!3A%$e9*g=D&yS6AKM+}-GOQdu` z|4uXFFZH%%B!I%2fCIB&!K(FO0rzaWSe~|Qs1Nm{0W6>hitch~aF<_!G=v}y(k$|_ zvlR>(vtl%}KdTcB2X=ral3QK&LJWtf>=X{6SkM%sc)guzf?97;y`hHQ4tSH(JrlP^ zsOF?;0PKK6HIvOYDOhT%WX%=yw-~4yR__7i3m90_dXH2k9^19B%%Nc|+*TV3*TZXr zdoyNKHK;zKLluWhBSl*OJ#+ABef1LbWz0#@UW}G8If{cMG9;31w-biQt~=NmgS4>{ zF;(*IH>RH}Hw0rDNnzUldM~ejy5vlLv(R!<(UOE|QY-X(cPKr(`vfUsBRb z%WJ9JeUWlOxEI1p=mNwFh>gA0TneF+dd^+fYFh$OW&IVFcs<2~*?%bpmfM-F~{}g#AbgKUk z2k7wa`%b^Bls4&ue!jINZ(5T3mgIr8t9&PPt5ce~cmf=paLgt6gXHL3oc|%-F_$QR zADy>6_&78D7(33h;{rP_vSW@N^Xy=MLMrx$)|XQVrR5dnNya?Kn0!uElm(&;uR~b= zad?}93*=JbjDrbvjTreP@f~GujZHiCAT(W4-XD7po_U6Y$*LPjf!9-Ss~?}QdUOV8 aKY!x^H+~b2v8U|?f7PNZ%#u$hR{jE?QNo=7 diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index de7dd5e9225d6f2b038dac6631430275fca4fca5..71c54c1834f74c64d7516d290fc8219188266219 100644 GIT binary patch delta 608 zcmX?A`nQ;OIWI340}wd6pG?`Xk@v6)S%_a+-k!WBKIy2J-xj zAf*sc!&t&Lx!ORIS+s<6vY?@~HY-e~mN}0pg|U`7g{g+2jG?HZgcl~mz>vaR!*2*SjGDp`RlIVbxY$!LJIz$8-Gpo%3+L}6lZ#q37PNXD|m zjTOgG>2D-2%K>&?dJDKukBky4q##@^&s&sR6u>u8|f%tR9 z%r(p)-OCsm7*+!@1TZqBu+%V=F%&hF@WaFy7*bek7_&Gg>l@0ea==7W*s_^G>>}|J zVVD?DB|A_h*W~|(G8!N)Fo_fnsA9Dn+!08R%Z|5@hRzVSyD;!`Q?K!kS`! zRq_hD3i(Bu>6v+nISQ#2$*Bb;nfZAN#U(|VdFep8q6(lZt7NgNiZ7`wNCnDlo@uPi zD9Lw=E3-H=uec;JFFCaclmLoaKm_;X3nmi-^MLW!z;Ksc@Pd%a((H@uMpxL4J}{`V z%0J*!xWJ)sgOl$ggA%L!T_Mp6Vj35PG_MG0He}vqpS$ x94AjR)s}%OaK6Cfyul7g0MSJ@mn&>87uZ}jA2d~9kx^rm|G@y`0z(No<;_|vTW#LVzBli;Z@zx-?fzCM zWD&^V^S|wX(+E8iP9pMraHwO19v~ap*hM;yZOThqDU3Wxm+{dXMOWfCs;0>r z$RvyH$NMB+VwWOv33}3!{A1YtcX*Cu+0zS4D-@Hcla>_h)Z-Ma0nd@>C8C@-SvkQ^ zT2flxX@1L!UV+=C-HNxOuXt6x3b@q4q+*;(W_OS+|00mr62w;Y)8w?R+SS-=B z*R-~|!iXQy0Tr3rsa_$j8(ge)gMQ7Vq&5h8A91P22!$n8EAS%-<`NBDVkIXNR!oM} zqr)FSJwO>0EyzYFT95`~2?FNw2VSOh7m8pLy@etri9qNaqU*ajT9g1ncIq4a6Z$#z zUuv(x>|OjVl-A~a)W>~vGkp#9aXoz%&M$j`O(oa`fIQ= z^-Yg()oimE(F|kDafxB@T%1NfG9ZRaHv-4!Szr{1j}gTTL*m<9ohN~cf<4N2ne{9S zs=}~L*Nt~8Z<&tUqvROLi?6_#*G^m(RQxL9JUm5*FhsNp>V@_JB&g?-59vj4#dpAj zo~1Fyvn;}R2Sh}r!SGraDK08Kh6SOu$YXU7DZ8t^r(X0AQ zL+j`F-XE$HbY{Bt?kMiD2b0Si!~Ar02Bk{2g=TaDFT2BBL>K?bLtrevD){F_R?Q`0SXCA)e9+YO9_rM#J zW@>#!C=-HRSuIoyXTrtyOgqTA9HWM^5ocr$Dg4hwvcr)mUUlb~NAX?bGNK}6I@Auu zuLSORa!jUEX}g9pyObbcml6W(R>FWiN;_b$!dyVH$WyY=&z&S7lRn z&kFsd%|clvY9ZQyotdFU01S#l2I-GJKbN{_sMttNU|cZrN?zBbiGq|mr^}Th>}901 zf~jg+(XCC@04c$&tQBBLvOI13H9eP8adxbPi7LoN89sn2ddvycbF0uqGvfQOkRFfb z^~tD&)#$WdIb(Ab*dDIHC+j5>Rqw_Xj0fK%#Y~q1|wq7C0J*c`4Y_f zLaQ~&Ork0F5}IUU+|W!YS2hzzl7k7ef=Bg5&>hv2EfiJ5kaDU)ujqN?JjObX>atwH zSXGWk7rmH#J?NK&0Pk7To?KqLcg_Deg6Gd}@bddeEEA3-zJ|3F? zXAMVWO{yqrqEIqSsbnVJ!8_rUxRbyx0=)q1w9>m#DbYs!G7!1Kpca(%j#bMFL$&Y^Kw@+iTAgX!mf3&D`-6{1N(Ac zd3v0|Xe*Z^t_F_l{u5ls4Y||MxVxjmZ^LoDLo?tyQ)a@Y;)J9~iGhK`i!2sl8=Ehb z$`!NeyaC7kjz^y!hEA&cMFErBvx9`vvMq0KdNNiQ;Ffr~T`Hp{$}^ZB8fpq!UX!uv#bct4y9A0*HR5c5*Az;6)&;dT5rffxa53!B)Yn_9rA zq-3R;3RbhOE#NpzNfhrC-Ifj!M{h4*uFunrRlTqY~~FRKmYzD)DZE zN_;~zom6paxP*oPpTsyyCHMfG&F1NtrbW2M()@`KKi^CYJ=836y1v;iQZQiXn1YEXf%740@XmKdQOF)FeQksFP($pUtS#9+iPN#so> zoKldRe}<-)(-4kCTsTUAP&aj5O!S1wZw!+^7me7S3n#%iDw=v4p9L8SZH<0Fh@co| zML-Nw1&wa`gsbbGWfZZTyo^Fs`aDMLHR0>_tL-zh}`*-&O<<?OS}M7#{JkqaAo-Es6X{|0s3U!ed1 literal 0 HcmV?d00001 diff --git a/litellm/tests/__pycache__/test_completion.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_completion.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2baa7bc5f07cbc53e53dca933dbb560cd12bbfc6 GIT binary patch literal 6216 zcmdT|U2GJ`9iQF3&pn^dW{qudfQ10@`SWZrUyABn3jrMDs^ADXP_KJCRHL{(LqmP$Oiq4q=CL{a-dD@#q4_USCS(rG0mq^gSib`^Puc%>crMeV`#kn6w!6F zLmnMbY)iIBh&)OZD5$I%@;Swj-=vD6YL;sD_dA}PMJNpBS4{(kw;c~Pb)r^Yhm~H) z0_3M31KdMFWLM`3fo<*TTp={WFQ2`e$Y#CEX#y>uX*Rd#(5uLx}gnlc~q zjyGdsqC0^Mu`DG`tY>v6C^e~Us%nl>f{)1$eU#mnGR8RGEY)CUf+udMB+KU5@ny$s zV!>n$E1C`i1}L$zreP6>BNm$vS$=k^`)!z7tNSXYnl*w)OkKIEDc#3SHJ5>jwz_rA z218}4B^%M|W|&+wXIFO2X9SpPR>au z=cSW(1OEzy?~c3|c|UU3-M)StYA7Pk+p!3b1GVpm@(pbdt^vS47igakwCC5_zlKy4 zpf{9H8{oF@?u(*_VtlLcNmIN|n62|dbs2m7aFNTN&!Dm~doYg$?3wXS!PHMzt^SfZ zJi6lo7rTRGP=`R2H-joWz$$D4Q+zD)4sO7lcSnF0=8!iRP7((O%TgXOZJwFAwF@n+$(rnn@`N?xcgV*{HQmkob`$~R#H z_5v$>%%5r;4$g>EV2Ek=-va}nm&}dO3vi>R#0}a4k3(MpSYY4(!p{LJeEIVNv(19* zPR?x zNpU7L1uA5^szhEgCu?4Sli`(|qkk0mgk$IVON>q?Z?PgSyvbN~N;u7mH-rIJ z{6H9Xsg`!BmKGS*(!!=wEp`xnD#m5uPfhU_VYbB!)n!!k!yCF(djb^@#$4%#2QJQc zpT{2N^=cw?Oswo}8iC^eCK+3Z-krECuyN zZH-t=^p1qWp zF3(Aq=cUV=`K>XH^x^(Ehh{l3-YLw6n&Pd(Y^xXQDx9h&_5rXdeENHU=Xv%SVZ{gZ za#t>%7x3cV!SNau)^Kd~!i+z(j7VN(93Tw)q;|yfW23X>72OnMd zVkj@2os-VaOJ_IHfTeZA{x%7tk^d$5ff^1m;ix$i8J@k2VUK*H_2Ii%KO*9zY2KQatIRC$& zRs`1A8Tg5G8d`e9*2fmUf~dyBC9b-pd^T97tu~hlK^8*ag4w#v%yoT*x%xg2sqX|49BY!#RRB!=u}RaU*!;AuA=a~fT940c!`p%u7Nlp-MX%1 zl+NDH!?9E+bCbt<)t*DKBc$iZq1d5e1@|85>5cWqu-bR9=jegha4(J>Jldy{1HFWh zzF6$=!QS3j>d4^(u~-Uvk{)^*UY-sxs9GI<&7vDDO1l5|b^gt}R(jg%QzqS~q=PUh zhHQ-iOrk=)F*#5T*9<&r{zCn4I9_*&erNycC3Q%>A3Sv_BUv@lM z2unIXxG9Y6_?48!8YqjUI{tEWjC$b6jm0@33)JLw0wFwzv(>P;WwIQ=b3-j|ILr-) zP_}NRY+*r}9HPe=FemDMnyxV(qNi|-Txs`0F^@X(sH1?O2ipp$e76FM=GWSvG;W{t&m-wc()zO{`Q#s2)GsCL$5x#P zij?KJMU%bd+}`Z$?Cjmnw=?{W%VkGU4o>`4viT7D2YDzJ%XZ_L+l0^^Bq52BQHY^; zHfD;j42qdU97B3o*%GsctT8^s$7~^6%pS5+J|jCq4m!daa?-miPggh{-S@y)dA#cnV@=-n~?~B!i>KHVS zB+IXvP`zZmg+dJye+%LIv*;eEcu-P=LXDCQdYeEGlI(Zc(0&%-I?3_SSv~&{3LU@) zvd!gz6%E{k8>{ULwcwVs=pApU74OHb%j`1SRF;=qDHlQ!c*CcFciU%!H&3o=4GfAq z4x@ggnwQvR(>1ijFz6a$;KKVQ_l~ybo@#dbQlLNOKqzz&)+9)^_~5V5=WO-57)1L# zySb8=zB3eRBk$~;hR`99Yruyd`l=-*_YL}06b`o)R~Bl!-H6*j`Y^7qmaVX5-`=)J zb@<2}E7jjJhmJ}OKdJ?dFR*~QjQmaCkgql6HyggOq_%+LS~MbR(L_9`C<(>zO~!AE za#Xs6-%DauD>B`eL=9hv#-iKLj_E}lADg6|DXzD-ueaCF8a5o8#gY__&l~ngBDN@F zqSntC-kAjji_*nJLJltDNK#8Ees0S(zo_*b?;YyVlFDpi%Nbb^HNw(h+O67%EG8w~ z6OF5y7}t9Gw`xFTOpHrC@#y@5Ca-M0t>8tO*pB;TS&WH213e>w*&d>1bRg0{95{jd zPYef!=R|4XME^iwARtAC2K!GQ56lfnfx(kQ5qx|AV>}cHj0_G81ZGc+91jF$VI=O4 zlzkK4l?{G?Bj{N#aPPn)>}WQ{A+*MoK&%LpEazJL|lVS zYM7OTjL#{=+?g$FEFoceRJd<4INCJ$7*WSl!?l@G~*O&=DKQ(sFq z_-Nv*pzb@L^PSK8&S&@x|J_qZ(8Kj1INe|P=&*Ne}OJnKW{PG~#>aa6SkG6h|P0F67S()iU~#uIa= z{0!YK_Ze!WN&u;7pJ7=_C^xX8^Z<*PrIKJJjh;H{)^X0!oOd+q=4Pc6n0|KVT=ZKQ z3hQ>n#N}{tr1}lfHghgjD@GzoMU1S3)ks3Y;AAZ`uz}$u0G|f;UDkxF(YORSK#vTy zYC%@eK^Jrmv_Y`8C%6LW{+RHJ|8IDGZ8yCBx4`;*&{FwHB?pDw=K>pm{_lfb=afP| zW;FX#1}PqBDPADk=&95Z27!;1lYS3j29R!4*s9wDG6_~H`^f-`l;jMeNU0nkEfO*+ zErhfJslYwz0d(V0a+>u18%P=zyyIWZ<-8ZtZxmWO9@Xo$CrGV1nV#5WU4UXg1Xi`a z2$H@Jt4su|N`zun-{&*gw$HBKy_#?7(cOJeb9^Ar2XsC_`~6u_cl)8{_|81vsq>w? zAhWC^@Fh28^aCLI(?DSP(w|5yH zdJ2RiqAZ6Kv)6G%tMVxFk*G%z->rrSiP<{{kr03dJC)i;HNy!l%YuY8F)FL00ze4a z0dO0DTP1|pE^}We~dFCfx!MKoPS|w-4%V1BBWKbLOGEc}Oo+iYiq$lOvPb z+zKIg5d|a@4XhaOeS*P&0NbHetz^+ayi~3e6rx44lC&ef0J8kHtoMboHXRKy04~xryEqEMOUIg-VvqTKvt++aPzYK}jON@{ z47YL+3jJC+U8elzHFMRxb*tvR<}J-YgG#Eu98{WNpVm*Md=n5fCAtL?TZ#%6Ruak6 zl|V;~PFZI~spke>G2GJ^gHvOZ;jxR8;WvWUXb@?*#zI#v1xvgcg{;8vj7`lep=8;Ph1_ktE136QXJWtb9U*@F4}kOiR+l|I<|*ay_9GpEKm^q8}swVNK-xd=UKBed6hmNWgCN+6&QPLMiS#0v$Vvl0sm~FsUo5VbdV;!?R-KhGAY5wFSchroeoL zRh(5R!@8a0CNlqXr^J9G9>Z-sX}} z)v!Po37M0@ocfyrv{^82;iY#_T2;#|^U-jcC*# z?<0CL72|jQ6wI2G@vF*PFfBP`R4X{(rio#g0&$?dFCD%6(po^aG`$MQo~Bm;)#{~M zK&D&#M>QYTtTXEapN`x){mawYk({q9@9WB;?mX(&QFj5k)ATAJzrI&3pnko&Zq_6F zd+a9Kp=ryerIzt*qB3bmzB3Hu9oab`Iqz8BJC?55G&4*Kgj6NMBae5ZO}8~eB`T1m zQRf=6<2u)o<2v$O$0o8c&0kFX-Ch54`DgORaQ1Z0eLU|zo_-yKefvpt)=WvpQsCX0 zn;Cb;ecQd!mg5hQ=Ev}Zw+u24&_k6`@=v1nTS9htd$mZ!wU<;a@-}XmKW*N)nU(IY zelhpx%EMUh;7@X%Q)zbracNGs_%~XCK+U1fJnGa@Cs@o_NY~LEYR{u~9kp+=9`e%4 zqysr!X};j{L9WX{hzTSeELc716r|VI)||C9Z*9#Ud17tXt?dQAc75z)7v$6hPt(R> zxaL0Vy4#iW94#9$n4QusgHUs5D36A8G_+}9nBgqRzM0|Ogx^n$DU;D3*qGsLT(=BE f&7qMz8qrHtB^kxn-tKXS>9NB+USoaiWq|$%2eGC< literal 0 HcmV?d00001 diff --git a/litellm/tests/__pycache__/test_logging.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_logging.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f71ef3a112ead93040b6db6ef65e694a150a216 GIT binary patch literal 2352 zcmcIlO=ufO6rRzpq?IgrCCfjNgB!IWv}!ClmSY8y(6q5#911BVDMTBnSnZ5uIjdbU zyJ{j4!nOpOV+e%=(t=NJN^;22$2Li!y{%vmVxbV|DK|kbCiKvmmDaISwJwN`=pAo>}9X3P0e&>t=8xVl|boAQ>Wpkg7ej9j?Niph` zQ)BhG6t53U!&J}XSXuP#5Reimh?6S;6uQn!BPcATa0&@1@?D?`rO^lo06dJMc=W;0 zZV%EJPM}yj4k&{5&Orc~f4fB-jry2iL-(6p{a^N|TO+RDq&bEX3{7gQd z4Y(0puV92~TGfpzdcC1yv!-hwyUDtauv%7RRW0k4HLRG*cCAtb#+zmL`TJDd2Ih`u z2wvT@QV^j2183W#9&&~uXAm~!nra-+WT6{ukeX&@Aqi4Qg8^n9z8WmK$MfOv)=7VXy8mbC99J z0>ASSJ9^Z9yTD}hXosJ7_<7s!V)r0MF&e+HqH89l z;_^gSaXVgZn7O(9e9ml=icX&gz1zA%9UF#R#l|ysv33FzqLYlSC{02zI-Y4K6jg2_ zoU3VuDQjkKHp>ylldyN)W8EG%@f9<-ov)mXDW z;Wy-ap8`uqcY+WAJlzu?m7)w`qkOVKL zu0XKS&>1rOOK_trI;qQM8DCM{h*yei`6D3g6Hf#xUI-^?;F>D_fMn^GCDO3yC%eZ% z2zvs6(4rN!=Wp2B_3MI!?e{L=t_$CYUyC>4&BC|GZlAh!YW-Lzb=XNA?tp0rOxs|( z3kJu#AZzc{pN{Xh!VXA16(Ux!$3s|LAMH^>#aBfLBo0}zZu-!@6+1T1`cTmldZ8eU zbj9?I_PV(Fo-HnP#05uOu$H@gmMxY#V#yIp*0Qzi zU7H>s#Nl;t>;uKktDBmA{(_C_wy1SP%@MUXBn4o|3fiIBn@rGtFL+P>G2CBwK*0e8 z+waCnT8Tf;j-2MfBYx`iLGIDJsnaLGqZ3mLlicICMi=J5Ht_6u%=yk&^Y*&~fcJO=UO+D#Vh~w60;hj*G?y0wm4gq(fK~0!5xB#-vEx z9cxRIP!y<#3`S8PMLz%|gWDn*`X>bL)Qti(2sat%)XhMaAXAT&=*DS#e7tw>ckknO z@5rCC*)-te{ZIZd?}-5X85;4BM~mm*LjWED6PVBj8jQ5yh=u@xlh7nM+(~N5Xh+uM zNGqCxk`GdGvV*8jY{l9HNPfj zmg9J$0a2!wk3Ac}lpcpjAx6w3x88^?zArz9+Bs9vRHT|IGyS8`fZ8}Zw+=qPrJYX! z0idiohSUapnmNlto8WsnbRLaA&Az_#@avTo|-t5>gaN6B$bWHZTXc|I)(IQ$5h;vhow1|ppT!q~+{*D!pHk-1zfO*5tL zTQ=n^S>_|JT|2d=YF;~JR?!J35gxjN(GIpKVyRPz$uXAZ!J&Ii$4n$`!UUJF6veuM z>S22lo=%%A$?NcBOxEip0%V&fv~36T9|qt=NVh}qfbZvdx(yQyP;XC|JNOg3CQysQ}RHJ5`WmfUp zFO?5D=G*8xeupOye?(H}^N9!{JW&9IUGP6ou6yPgfI<+<0Ek~HU+2Hf@5B9T-!48{ z`)2K6v8P_?t5w&u2Q#bqS z=AJT8-`Zb3*!b;cpsx1R)xNsAr|c;&24mwV07^VVJPnlWo)qMkf|-Sbss7ADZ)UMS zvls%orJl0fSC)f+$qPPsKc$sWr(!~6lK1*ZnEv>ZaCAw!p-4xw(>FxvcQIW54fe;Q A-2eap literal 0 HcmV?d00001 diff --git a/litellm/tests/test_bad_params.py b/litellm/tests/test_bad_params.py index dd1e8d5093..53872e5fad 100644 --- a/litellm/tests/test_bad_params.py +++ b/litellm/tests/test_bad_params.py @@ -26,7 +26,7 @@ litellm.failure_callback = ["slack", "sentry", "posthog"] user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] -model_val = "krrish is a model" +model_val = None def test_completion_with_empty_model(): @@ -35,4 +35,4 @@ def test_completion_with_empty_model(): response = completion(model=model_val, messages=messages) except Exception as e: print(f"error occurred: {e}") - pass + pass \ No newline at end of file diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index adb55a45e3..b9bbbebe4b 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -1,5 +1,8 @@ import sys, os import traceback +from dotenv import load_dotenv +load_dotenv() +import os sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path import pytest import litellm diff --git a/litellm/tests/test_logging.py b/litellm/tests/test_logging.py index 21e4a879c6..dbacf8b472 100644 --- a/litellm/tests/test_logging.py +++ b/litellm/tests/test_logging.py @@ -7,7 +7,9 @@ sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the import litellm from litellm import embedding, completion -litellm.set_verbose = True +litellm.set_verbose = False + +score = 0 def logger_fn(model_call_object: dict): print(f"model call details: {model_call_object}") @@ -18,6 +20,7 @@ messages = [{ "content": user_message,"role": "user"}] # test on openai completion call try: response = completion(model="gpt-3.5-turbo", messages=messages) + score +=1 except: print(f"error occurred: {traceback.format_exc()}") pass @@ -25,6 +28,7 @@ except: # test on non-openai completion call try: response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) + score +=1 except: print(f"error occurred: {traceback.format_exc()}") pass @@ -32,20 +36,23 @@ except: # test on openai embedding call try: response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") + score +=1 except: traceback.print_exc() # test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model try: response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") except: + score +=1 # expect this to fail traceback.print_exc() # test on good azure openai embedding call try: response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") + score +=1 except: traceback.print_exc() + + +print(f"Score: {score}, Overall score: {score/5}") \ No newline at end of file diff --git a/litellm/tests/test_model_fallback.py b/litellm/tests/test_model_fallback.py index b389e9f6ac..69dc1f68dd 100644 --- a/litellm/tests/test_model_fallback.py +++ b/litellm/tests/test_model_fallback.py @@ -12,7 +12,7 @@ litellm.failure_callback = ["slack", "sentry", "posthog"] litellm.set_verbose = True -model_fallback_list = ["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", "chatgpt-test"] +model_fallback_list = ["claude-instant-1", "gpt-3.5-turbo", "chatgpt-test"] user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] @@ -21,6 +21,5 @@ for model in model_fallback_list: try: response = embedding(model="text-embedding-ada-002", input=[user_message]) response = completion(model=model, messages=messages) - print(response) except Exception as e: print(f"error occurred: {traceback.format_exc()}")