add mock request to docs

This commit is contained in:
Krrish Dholakia 2023-09-11 12:19:13 -07:00
parent d4806cd686
commit 6da500c6e0
6 changed files with 31 additions and 4 deletions

View file

@ -93,6 +93,7 @@ def completion(
deployment_id = None, deployment_id = None,
# Optional liteLLM function params # Optional liteLLM function params
*, *,
mock_request=False, # to mock an LLM request
return_async=False, return_async=False,
api_key: Optional[str] = None, api_key: Optional[str] = None,
api_version: Optional[str] = None, api_version: Optional[str] = None,
@ -134,7 +135,13 @@ def completion(
): # allow custom provider to be passed in via the model name "azure/chatgpt-test" ): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
custom_llm_provider = model.split("/", 1)[0] custom_llm_provider = model.split("/", 1)[0]
model = model.split("/", 1)[1] model = model.split("/", 1)[1]
if mock_request == True:
## RESPONSE OBJECT
completion_response = "This is a mock request"
model_response["choices"][0]["message"]["content"] = completion_response
model_response["created"] = time.time()
model_response["model"] = "MockResponse"
return model_response
# check if user passed in any of the OpenAI optional params # check if user passed in any of the OpenAI optional params
optional_params = get_optional_params( optional_params = get_optional_params(
functions=functions, functions=functions,

View file

@ -0,0 +1,21 @@
#### What this tests ####
# This tests mock request calls to litellm
import sys, os
import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
def test_mock_request():
try:
model = "gpt-3.5-turbo"
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
response = litellm.completion(model=model, messages=messages, mock_request=True)
print(response)
except:
traceback.print_exc()
test_mock_request()

View file

@ -1938,8 +1938,7 @@ def exception_type(model, original_exception, custom_llm_provider):
llm_provider="vllm", llm_provider="vllm",
model=model model=model
) )
else: raise original_exception
raise original_exception
except Exception as e: except Exception as e:
# LOGGING # LOGGING
exception_logging( exception_logging(

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.590" version = "0.1.591"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"