From f753aac5aae4acf33eaba294919698ccbb2ee98a Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 31 Jul 2023 18:33:29 -0700 Subject: [PATCH] handled merge conflicts --- litellm/__pycache__/__init__.cpython-311.pyc | Bin 690 -> 690 bytes litellm/__pycache__/main.cpython-311.pyc | Bin 8872 -> 11380 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 12674 -> 12674 bytes litellm/main.py | 16 ++-------------- litellm/tests/test_completion.py | 8 ++++---- litellm/tests/test_model_fallback.py | 10 ---------- 6 files changed, 6 insertions(+), 28 deletions(-) diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index ab63061a31852414278c10cac48252df6aa50a01..73352d569b34079e00484ef045a889fb1748fcec 100644 GIT binary patch delta 19 ZcmdnQx`~x*IWI340}u$tZ{%9R1OO|g1Q`GT delta 19 ZcmdnQx`~x*IWI340}$Nw+Q_wn2>>tb1k3;c diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index eefa5f283275bb85e59ca8f7f526232bc65524d0..11eb85d841f7d983653af7afa7b2b1cf73f994e8 100644 GIT binary patch literal 11380 zcmd5iYit`wdb1>V`F=k{iF#0yEs1`~vHXx7pJFAJ9Vd?Evt^&ni^HS2D~U26>T)GN zZkgj5HmWjG{$d;C|fCU3Gyh77!rx-~ws?Fh~L%^5f8N zmgMqbDY2bPFT>%tGvCgBGxNP>=G*;yyWN5y-HrW@{vd+T?{TG)9Oc62|4bls2hoTo zQYc!w5>Y~}>!LdKs*mc`t08KDt1d;RsVGI@w)&JYZHk)G=BQb%8&a0EHEK=UqPDa> zYFBYY$`N&_&pV?|^;)UBqAsYDDOcJZb*DX1kJ_Gsx;N@g`=Y+`^Zuw`hZyHnpfdP- zP`F>IAOcMw+DMz!pP440Z4+&|2lKiQ|5Ac}P!d{=+i2@GA{u1e(8@&HX!`@lJsq@! ze&I1Ji#C`0bgtQ_nRe2|xPkUuBkG^4uSHvE?==(+(Zop< z@qLPid@MpJ`r*vwh)!{h&twv@9K*%9WSYs&aEj{?V7O#9GX@BHbcT!Q74vY0n`E=o z$%JBi{nT5-$707%jJ|bz3~n4L7l9U)NF|vJrx;V&iHT%p0+Y|pBoa(67fZxbsq^u~ z1?VuIOwF)N6@xy-JU^u9lbP{^t;A9l8UX6y|M>=0;gTqC<3Vs2Fb=UKk9ZqR?1wT> z&^irE09LPJb+kdl>HtfsSbYsv4_HdY8fvfxz#3I73B8a|GpRL-uJ%m<)~sTUoTf4e zH7b$TzY*G7)%Ku58rB3@yNWf}V9kJas8~y~+M@-qP8DmdnXMJDE){D7@w$&GW-L%F zz8q@@)+qK$U1d0njaadAOnRDO;~dCAA-U{yY+4~Q3YFtnCZ1LdIY6;a+|rVcUy5CF)ZjCIiAYK zxrm*00~ywXffoZG27U~%t+GuR1TkpFpap{v2CV=TXLcG-EuM-^$Jux~m%~#G*QT!y z^q+zbbNv@sHkq5GC$p*eg=D<{U^X$62Ib84r;;3#N~N_c$K(LJZCigjp3H2UzN~mB zz#1AlV#Cl89(fMD7%HG*JMuRb`eo8F-*q)sh{+qb6|(aeM5;?BJ@e<6$)G?6<;ZS8 zW<{z;CSCKx%cNf*{c`UPK&C}%gG^fH;#Z@EsNC5Ds0op3mq}`_^XlosX}PT(PzjN0 zkx6S25&DSS)d!eKk?N31^W50gw+nB};SGQq7pXRxbl%>z;D2xbt^Lbnhd_47J=@_{ zN~F4F(!bzdCR+ruMc%T%kQD-bA{CLzz(VIT84}2lymbg;BOAbo@EmZ_9WP~knt;6rKt9}xGT z^?l8M_m0a>_5?ZPIK1?#k4XNgqnJkI;LdJXffqhXo`fL^&OT`g)F zR>9ya=$h|=vwq_R8mG_vk#_NzmtQ=Wow(~qoO|oJp7zu~&2IZPPv`x-f9>aKH+G}z zN&>cZ-z*8*$NQ2*KERu~?kYcNfBg)~I}!`{(*PY%_Y=6(gi8&w41iw^^5!G~KBM^_ zc*^^lw*;TDvD(L!rUM|mz=M_s*)`Fjyq9k}h|U@FLEg&;uYj+}dvBQNR=SOD=e=|X z9loohJMZcz2$(@L->k};YOp7~flw3Zz<+a_Dm8?Djq*OspL56aEp!*(0{3_GP{JFF zQlh)d>w};7)jtXCHdNdF8hxNsN9ViI3oW-)p2vNx+j9Ri)BIs0%~D@nf3=@_Ys-h8 zNt$}zoNvwO=#BZ-1{s1qt}jDAG^Kg)^=*-Feb93cR*3uBDv@t1ORjB7V_l;Rxn0#c z(Y<^#=4`zt5=fP^F5_vCGu{QN3FLTlZF^Z~-R`UJA>YpJt-DNNo7{k=U!lCr1=_wSIc!#K|M0#}s=N_2$Ugj|~c$&eBXuA(NTu z8BQg`_5v*&U|}*4S78)W8bT`Z2_~mdS$qsne$_n5anred{r&Ojd&Uu~}-~tYO$rXi=VpLQS(kl2gnqGo4B%;vA!R zPmCNtdgQg?u@c+IM&BGcrnu{oz=s!|IJ*bBXYm5U?gNmknJfDPKz!3A-^EW3VSwks zV*RfPwWL78+@#{bVV4YZu@ZX0>v4oE2@dZHO3LMo({!>N4~erAxssX?tl9#*dl1oN zHVN^!Qdk8KRWZkNIbe7^!vPNmF~-1VQw#+8?BmG{giK-(1IlGHSpR6|5{q|$>=6uF zF~CnGw}8?_^rhk82`Ij5m zp-JG+r3e38H;OBpk7dKN@l=uyS8XJ$I)X43ZFm~Q#?awpCcHJQYG0&_4Pz2J04OFf z`xtk5no)GwoML1$vq=_2sE0^a>E8Kxj!~?*6$D&zV1J4cS1vG@S-g6JXeQ&_YnpXJ zC>OKBT6uBs`6lb_*WDNqa$xlAiI>=(30v3Y_`S9r%B#pL4{2`p0i05Vo| ziL7G4dZZ9ct_$wghD_HktKM6sqyp^Pa-V@9Aodwxb$Z*AKty&mFSW^@w#8FR@ki9- zG2!%2@_3>-jm0-(V%ZHBl z9YXIOF*qm%2UTpS?e4x0hu$9&HvK>h4N0M)>*Ep?Txn{)b>+^xH{V^%icPzvrrpps z^Mta`k1Pfs`X5IG>ZnK^m8hdoc;ar7+&z+eYY`bdCfVtey`drrMYUd}khKMdSFRQ& ze?AFgvp8?}N}fT%jI$gJv|AI|_&QyBvk= z^!)15FE5qW$wz~56@v#QfX)%gIZ_xeaVoGh^VlpcYR8Dn$;*Ko4L7;xw z+gv1(XVYg$?=gLWfyx~dviKKB!Q2KJ=H+nRi2mxGU%n%>z9iarNcJ5?q%$5M{!RC< zmVbBri$gdZ6$gJT4gR=@5c9)F8pHg^l=Zn?d0j+{%0K(TAf94R>3MN>yl*629Fr`sbH>Gwz}tkBv@M^%XZ)V=^L>{YT4c+*n4D~ z=Yv)u@bcotrOQ(9UMN8pZ7BhrT5P=YMl2Gx5O*KJ<#?OpuQ z(phQa9wm!8fq@HUP*XwNs*Y3e-;7-B!clUcrp>ir0T@-<_eGLqgXM(K{e{ z2MSXwR`-n`EVPK$4U%<3;rur#UUjN^M*py4_J9x4gHcw%iorU+ap4lU!5ZZ?%!-Ct$uKJzW@U@DaQF|9z%l4;aLwV( z_ti6wGj1fiEoTPxawvFaKrauFP)=yEPiTau91JWBfg>IoMBo~$ zzQe(9Q_BgzM}qfyh6mJB@1QD8;5uclR2H>}PX&XoKm);BEI8#W4mgio?h3a-tnP^|VwWibTS<6V79$m<_uL zTC z1`h3+iS^EQ5qF8uaZKok7@WZ1BnD#`oWkHV2I|4hJW`>^p25iT02DoZ$ibe)rC{aQ zrgdC7T}|NDlK>*dhPXoM#L|oJPhr3U$YD(?9~9J+fnwGuvX|lBA916<1fK@y1K1yW z3VrzV2Kwr`!nxHz+x0`&MppvCTT^#3H#5S z5Gw|=VCkLD0)Q+U`Xoc2VCY*h+OOq*{==nl9I7IPcEs=*)al|R^H@kW8gQe(3i;8mm= zJ5kIW8&5Fmr*{;cg?R0(${H3grm72J@#}ULe>cPC0Vr1ZPK%C}o2o&YFuoVZ09$&A zHG6TX69YBMYuXRtmYEbY#9jv+=3}k{7UH6wAP5;ngcr>+3JCRCMlTC%Wf=_$>t`9c zgi4l?y`cW;k&c4;lTo*@R+iCjVXdrDTScA%B_WT9d=l~r$Oi`;w^#NBWRF+2*=3Vi zwtHl&Q+5aD{qr;Lwccu#n}Tw%S%y|tdok2Uz+tl;3bt9cR7H+8z^|K!6Aml2Ay%Qu9L$Rf;fX@g2-M>)GYb zIOgXrMp1Z+M8cKYhot;fa;ajet=tD*DpA1$QmGWuAgoazDo`J)@~~0hg(uFL^*VT+ zz?J6gnRCuPKi@g`?tFXlgG0lAh(v+}*1@MgW&8UH`5Ir^OIv4doV+mn@rapFh#6oS z3#{nN3L%%_X)P85X^>GCx}>tOtkdn|+1p#s60p@6ecs0+XX!GyxGc{{veBn{SoAD; zD#z#~G2-$o`d0pb`#}4ILET8XTCFV}J8g1aVdh~&mB;;sT{G%>0{9z%2Y+w8trPNF zq7z0Me&J*El$R&}8&l*%gBh7Cj>-22*19y|6J2ttcwm4Cx}t5asNp}aTK{GNK~59? zHSS61%M>z&r?u=Oo;0{w%EwW9SBJC_5CO;6-bu(@J8!~ttdNCdBAQVEx<7>mkLfHT z^a^cgLg7K72#rP2$m3$jIfbf`17^%Z)Q7ou79 zy<6_zp|tBA#(LAYtQT&8?X(Y@c>tQZwL)um2TBz0jjbp-dvU0ilFozex}dQmB6*Sg zPTAasGgSB|$slp|-?ZF8__juBYoyyBY>_%c;f71SmRh8X^JZswW!_;o-%4Tv(`F@Z5#J5YEFITMb`pfb}Y9}3) zOQ}-i24?5_>%2B!cjT+-zG*i&Uw&!Yshu({+vwt1jDW+&F_s%EGgfgbHLF~mF7sKt z6`Xn&E^)P)7iyIm({_EfQ>$~llU==R+ZEd>TaKad0eL-LESAvUZdCdfY`El{}r@B(aeWYQ-VIyk;`GK=L=cd*oELblUYX&0Fvc%YC_l^Pd}-k1^;~+Sg%&D2Z8tFX z8}QHZI%)ckD{6QxmVEuOH7zV>@=uPWKQKRh`HFwybN|FO{{%4qO=PbnhQ3UUT}_Ow z#Zqg@-ewo+9+$zsi(O5kXk)9ZeUnsM)00i2YO9%kpPD&QJW(Q7OTo#odiBnpNlpEi z)-@Sk-8FDUrE%Gx%j(c4Z=iB=pwydv0CU@m1poEgx~!Zm-Tj!S!vn+*AUTNSAtb** z@-UJ|kl@FSs~?(w&V=c-X8BKZ(L$0Wq|Wscv-$j}bqKU zF}y^-5A2(-v4yI+pW7hAZy9@){{HAFr9m0pnR&3OJg(62N8O*^d*$h8n*?9B((5Ww XMz2u=e*>>>>>> bd42ec8 (clean up code files) if azure == True: # azure configs openai.api_type = "azure" @@ -97,7 +90,7 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False messages = messages, **optional_params ) - elif model in open_ai_chat_completion_models: + elif model in litellm.open_ai_chat_completion_models: openai.api_type = "openai" openai.api_base = "https://api.openai.com/v1" openai.api_version = None @@ -111,7 +104,7 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False messages = messages, **optional_params ) - elif model in open_ai_text_completion_models: + elif model in litellm.open_ai_text_completion_models: openai.api_type = "openai" openai.api_base = "https://api.openai.com/v1" openai.api_version = None @@ -221,10 +214,6 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False ], } response = new_response -<<<<<<< HEAD - else: - raise Exception(f"Model '{model}' not found. Please check your model name and try again.") -======= elif model in litellm.open_ai_chat_completion_models: openai.api_type = "openai" @@ -255,7 +244,6 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False logging(model=model, input=messages, azure=azure, logger_fn=logger_fn) args = locals() raise ValueError(f"No valid completion model args passed in - {args}") ->>>>>>> bd42ec8 (clean up code files) return response except Exception as e: logging(model=model, input=messages, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index ee64099ce..88574ff2e 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -1,10 +1,10 @@ import sys, os import traceback -sys.path.append('..') # Adds the parent directory to the system path -import main -from main import completion +sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +import litellm +from litellm import embedding, completion -main.set_verbose = True +litellm.set_verbose = True user_message = "Hello, whats the weather in San Francisco??" messages = [{ "content": user_message,"role": "user"}] diff --git a/litellm/tests/test_model_fallback.py b/litellm/tests/test_model_fallback.py index 8e031a980..b389e9f6a 100644 --- a/litellm/tests/test_model_fallback.py +++ b/litellm/tests/test_model_fallback.py @@ -17,20 +17,10 @@ model_fallback_list = ["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc54193 user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] -# for _ in range(10): for model in model_fallback_list: try: response = embedding(model="text-embedding-ada-002", input=[user_message]) response = completion(model=model, messages=messages) print(response) -<<<<<<< HEAD - if response != None: - break except Exception as e: -======= - # if response != None: - # break - except: ->>>>>>> bd42ec8 (clean up code files) print(f"error occurred: {traceback.format_exc()}") - raise e