mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
add kwargs to mock_completion
Allows replacing `completion` with `mock_completion` without encountering errors.
This commit is contained in:
parent
c0e3a47650
commit
90df061ffb
1 changed files with 1 additions and 1 deletions
|
@ -952,7 +952,7 @@ def batch_completion(
|
||||||
return results
|
return results
|
||||||
|
|
||||||
## Use this in your testing pipeline, if you need to mock an LLM response
|
## Use this in your testing pipeline, if you need to mock an LLM response
|
||||||
def mock_completion(model: str, messages: List, stream: bool = False, mock_response: str = "This is a mock request"):
|
def mock_completion(model: str, messages: List, stream: bool = False, mock_response: str = "This is a mock request", **kwargs):
|
||||||
try:
|
try:
|
||||||
model_response = ModelResponse()
|
model_response = ModelResponse()
|
||||||
if stream: # return a generator object, iterate through the text in chunks of 3 char / chunk
|
if stream: # return a generator object, iterate through the text in chunks of 3 char / chunk
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue