mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
add mock request to docs
This commit is contained in:
parent
d4806cd686
commit
6da500c6e0
6 changed files with 31 additions and 4 deletions
Binary file not shown.
Binary file not shown.
|
@ -93,6 +93,7 @@ def completion(
|
||||||
deployment_id = None,
|
deployment_id = None,
|
||||||
# Optional liteLLM function params
|
# Optional liteLLM function params
|
||||||
*,
|
*,
|
||||||
|
mock_request=False, # to mock an LLM request
|
||||||
return_async=False,
|
return_async=False,
|
||||||
api_key: Optional[str] = None,
|
api_key: Optional[str] = None,
|
||||||
api_version: Optional[str] = None,
|
api_version: Optional[str] = None,
|
||||||
|
@ -134,7 +135,13 @@ def completion(
|
||||||
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
|
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
|
||||||
custom_llm_provider = model.split("/", 1)[0]
|
custom_llm_provider = model.split("/", 1)[0]
|
||||||
model = model.split("/", 1)[1]
|
model = model.split("/", 1)[1]
|
||||||
|
if mock_request == True:
|
||||||
|
## RESPONSE OBJECT
|
||||||
|
completion_response = "This is a mock request"
|
||||||
|
model_response["choices"][0]["message"]["content"] = completion_response
|
||||||
|
model_response["created"] = time.time()
|
||||||
|
model_response["model"] = "MockResponse"
|
||||||
|
return model_response
|
||||||
# check if user passed in any of the OpenAI optional params
|
# check if user passed in any of the OpenAI optional params
|
||||||
optional_params = get_optional_params(
|
optional_params = get_optional_params(
|
||||||
functions=functions,
|
functions=functions,
|
||||||
|
|
21
litellm/tests/test_mock_request.py
Normal file
21
litellm/tests/test_mock_request.py
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
#### What this tests ####
|
||||||
|
# This tests mock request calls to litellm
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
sys.path.insert(
|
||||||
|
0, os.path.abspath("../..")
|
||||||
|
) # Adds the parent directory to the system path
|
||||||
|
import litellm
|
||||||
|
|
||||||
|
def test_mock_request():
|
||||||
|
try:
|
||||||
|
model = "gpt-3.5-turbo"
|
||||||
|
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
|
||||||
|
response = litellm.completion(model=model, messages=messages, mock_request=True)
|
||||||
|
print(response)
|
||||||
|
except:
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
test_mock_request()
|
|
@ -1938,8 +1938,7 @@ def exception_type(model, original_exception, custom_llm_provider):
|
||||||
llm_provider="vllm",
|
llm_provider="vllm",
|
||||||
model=model
|
model=model
|
||||||
)
|
)
|
||||||
else:
|
raise original_exception
|
||||||
raise original_exception
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# LOGGING
|
# LOGGING
|
||||||
exception_logging(
|
exception_logging(
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.590"
|
version = "0.1.591"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue