From f944eaee4bf66473d9aa7cdfabdcd95958191759 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 11 Sep 2023 16:30:29 -0700 Subject: [PATCH] mock responses for streaming --- litellm/__pycache__/main.cpython-311.pyc | Bin 32049 -> 33013 bytes litellm/__pycache__/utils.cpython-311.pyc | Bin 90371 -> 90371 bytes litellm/main.py | 24 ++++++++++++++++++++++ litellm/tests/test_mock_request.py | 18 ++++++++++++++-- pyproject.toml | 2 +- 5 files changed, 41 insertions(+), 3 deletions(-) diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 62fec350c1a8c9f1b244b63bee086e5dcbcd08ba..4745a44d98e2714f96ad30af658dbf937761c639 100644 GIT binary patch delta 2540 zcmai$eQZ-z6u|F&?Q6T0wXCvF-3R-$CPQKEzBNxXm#+79tF(RdzBdL^ z9Lhiw!LR!V5J8Pbg!qX~{6bK~MEu8ey6Ak7Xf!eEA6h?%lNj}!S2mbNd2N61+;h%7 z_uTvLJ+H^k({uMou4nV|atT}?JoOjbcK8ujG5!8n{itOnYh&d~6RS`fSgq2+8kMGq zgZU=p4a*geM3hC$f0z>T9Q?A^5Miavt+jo4Y!1QltitfYfAPmm2OSPQU|wF0{s z?IYem>JLl%5_&$>x@lnG@uUN;k9yo)^6mhJPL+TY=A4; zh?>w8J}TV4O+{u1v$u`+q5C=n7mxU^pj;^S40K+JkBPpfwWSZB*B-?DPJFL8*;G%*#5Ya7MRR$3E%TDP zvAN$lXX9JMi_H&SwF6^edi)-MCIKodxfd1pithu}MKY-PPHYDNkcc_W-Qv-fZ8iU$ z4f>laJ0_O4R?xB3+SVL;*}-gab1?Bl+xpA7Oxw(-v{&@5I7#ml?#=*xTCDEeXgh*_ zN5zYs6Z8eq*!4JlReaRdSTl_34TxJ2QN(SCjfguCE5uo)O!NfT(Feq1!IG+rJD?TJ z237cObo&sIMofuMf=h;{QTz;GNMV&9#MU9i7pQXU>X4!jD|{dn9oVF7`_?FQ9tCW`*m@maKvQfPIdMSa-ecClr589q2B<@=BbNVdn}DMK#4M z8xCDbgu+pMD?cMXy1kJ)g>Aq`-KoI97R$na&R?7!^xB@rXmRoL;CuA*)NA2pO79gv zvJDI0MEzTc1%CzvFzY^to}RFcWPwrfa-F_#-W}R3svuv^MopWRcXu z2hk%hu7)?wo$#;~bhDQYglhQx(gek4HKAl5$ssxwYwJqJtx1V5*I`@CUX1GUtV~GK zGQVgs=`i>{H|M{SY`ki_%q&SuQclWD!i}^`DwzwwtOup-HYO)+j4<0lX^2K?KiNkq z*;B?yAMr8!c6-vEv@-dUPV^GlafzJM4uN&MIOSXqtXz#L+^ltB@PIP$3fzpmNHh`E zhG9)Znwl_InKO)FdPYzhLSRj~tKvpj{>rc#ABib?R83UQ*4oNQC>m4PxL4*?=~>t0?wS>T&0 z@MTDDp2x()bov_qI}PdHwduZf>6`i|`})(p{Zql4(~H-`_hHwU3rjLKQs5bK7^VNc{Y5elvD>q`StDB&SWGC3A&2X#jTUBwkcOz+R-*M zGoyK6z7t)}pq-q`_XFRx=Ld@|snf&HTi3+EwvMj?auqRxxDx;`kzFWvBZ3GGwNm!= zxEU2@?MtGx{enrHi2KL)W8=ar=nb^qfC>+P8)dVP8F&FUFGlae<|%-YbA5bJVdk6g zG%E418^!8~lE7-E%o?**NaJQzIERk!Ad?F=eAy@vtS$TQ@R8XDn7w}g$>(ezftSxX=78yaQmx_P2fgH zBh%2+xr6B_caGmTA?m__fb4={vX2w~R3S)i$kOn`&PcUF=01UUBxoT!itSy`h{B6q zdj!Af(p*WBQ+U`{Uz!u9EI|%azM~D#ll}ssm_gPcVGh@QEiCtv|A-jcQUBedPTW6I zu2IbD{}itJ&+nKfpIXCN0ZkHUY66*r8Qk5wx6UafI7fa!K=idHG>Z!d&NTmbbIP~6 zs#gS zulQ>@oG@#Q^h(T?rx_RUR9~xD=()aim-SBO@X3SA>^YSC18g37|4G*+%6l7E`wMIV z4+k%@tGE&DXpECQN_dth_kkwkc$=ix^R6^BdlvglYu z52sXxCrt<0ut2gW$3f?l#Q@x(XReU$j`K8!h^~TJxj9aJW1z})lQMobjDcO-x6`D} z5)f$#jmu86ROjhPG@<8V1vgH0urh24cQUW>Xn4|I_0JW;?@r6Ed8(E~PxMpvt?^aV z&)5vQ`DoQO(%&O25#C3Q?-noMWj^%pIyd-mC7m4_iIz?38r0w;u>yr%P%ESU~alN=Nq zPG<+4Z6Zy=mF#u4gm312W!5$)c#I!%BWx$Fw^<%l)3HQ6oQddT;H4g|gxh#*((m+% z3RQT1@% Bn~wkh diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 58e8830f85c59fe765556e98fd0b8fdc71e9eb70..4977702a255d7491a205eaf7bf07a7b57b584686 100644 GIT binary patch delta 24 ecmZoZ#M*p_m1{XKFBbz4`QOt+RDZFGXVfv8wWE0 diff --git a/litellm/main.py b/litellm/main.py index 3d63f7e36b..a4803b209f 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -954,6 +954,30 @@ def batch_completion( return results +def mock_completion(model: str, messages: List, stream: bool = False, mock_response: str = "This is a mock request"): + try: + model_response = ModelResponse() + if stream: # return a generator object, iterate through the text in chunks of 3 char / chunk + for i in range(0, len(mock_response), 3): + completion_obj = {"role": "assistant", "content": mock_response[i: i+3]} + yield { + "choices": + [ + { + "delta": completion_obj, + "finish_reason": None + }, + ] + } + else: + ## RESPONSE OBJECT + completion_response = "This is a mock request" + model_response["choices"][0]["message"]["content"] = completion_response + model_response["created"] = time.time() + model_response["model"] = "MockResponse" + return model_response + except: + raise Exception("Mock completion response failed") ### EMBEDDING ENDPOINTS #################### @client @timeout( # type: ignore diff --git a/litellm/tests/test_mock_request.py b/litellm/tests/test_mock_request.py index 637aed3e7b..33a1ac8ba0 100644 --- a/litellm/tests/test_mock_request.py +++ b/litellm/tests/test_mock_request.py @@ -13,9 +13,23 @@ def test_mock_request(): try: model = "gpt-3.5-turbo" messages = [{"role": "user", "content": "Hey, I'm a mock request"}] - response = litellm.completion(model=model, messages=messages, mock_request=True) + response = litellm.mock_completion(model=model, messages=messages) print(response) except: traceback.print_exc() -test_mock_request() \ No newline at end of file +def test_streaming_mock_request(): + try: + model = "gpt-3.5-turbo" + messages = [{"role": "user", "content": "Hey, I'm a mock request"}] + response = litellm.mock_completion(model=model, messages=messages, stream=True) + complete_response = "" + for chunk in response: + print(f"chunk: {chunk}") + complete_response += chunk["choices"][0]["delta"]["content"] + if complete_response == "": + raise Exception("Empty response received") + except: + traceback.print_exc() + +test_streaming_mock_request() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 13f83e81e4..d1c37d7bf0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.597" +version = "0.1.598" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"