diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 6841fe91c9..62fec350c1 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 70a03ceefe..58e8830f85 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/main.py b/litellm/main.py index d5e5fc6409..3d63f7e36b 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -93,6 +93,7 @@ def completion( deployment_id = None, # Optional liteLLM function params *, + mock_request=False, # to mock an LLM request return_async=False, api_key: Optional[str] = None, api_version: Optional[str] = None, @@ -134,7 +135,13 @@ def completion( ): # allow custom provider to be passed in via the model name "azure/chatgpt-test" custom_llm_provider = model.split("/", 1)[0] model = model.split("/", 1)[1] - + if mock_request == True: + ## RESPONSE OBJECT + completion_response = "This is a mock request" + model_response["choices"][0]["message"]["content"] = completion_response + model_response["created"] = time.time() + model_response["model"] = "MockResponse" + return model_response # check if user passed in any of the OpenAI optional params optional_params = get_optional_params( functions=functions, diff --git a/litellm/tests/test_mock_request.py b/litellm/tests/test_mock_request.py new file mode 100644 index 0000000000..637aed3e7b --- /dev/null +++ b/litellm/tests/test_mock_request.py @@ -0,0 +1,21 @@ +#### What this tests #### +# This tests mock request calls to litellm + +import sys, os +import traceback + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm + +def test_mock_request(): + try: + model = "gpt-3.5-turbo" + messages = [{"role": "user", "content": "Hey, I'm a mock request"}] + response = litellm.completion(model=model, messages=messages, mock_request=True) + print(response) + except: + traceback.print_exc() + +test_mock_request() \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index d901eadfba..2a19e885b2 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1938,8 +1938,7 @@ def exception_type(model, original_exception, custom_llm_provider): llm_provider="vllm", model=model ) - else: - raise original_exception + raise original_exception except Exception as e: # LOGGING exception_logging( diff --git a/pyproject.toml b/pyproject.toml index 63c955461f..0dfbbbefa5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.590" +version = "0.1.591" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"