mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
mock responses for streaming
This commit is contained in:
parent
3c1901216d
commit
f944eaee4b
5 changed files with 41 additions and 3 deletions
|
@ -954,6 +954,30 @@ def batch_completion(
|
|||
return results
|
||||
|
||||
|
||||
def mock_completion(model: str, messages: List, stream: bool = False, mock_response: str = "This is a mock request"):
|
||||
try:
|
||||
model_response = ModelResponse()
|
||||
if stream: # return a generator object, iterate through the text in chunks of 3 char / chunk
|
||||
for i in range(0, len(mock_response), 3):
|
||||
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
|
||||
yield {
|
||||
"choices":
|
||||
[
|
||||
{
|
||||
"delta": completion_obj,
|
||||
"finish_reason": None
|
||||
},
|
||||
]
|
||||
}
|
||||
else:
|
||||
## RESPONSE OBJECT
|
||||
completion_response = "This is a mock request"
|
||||
model_response["choices"][0]["message"]["content"] = completion_response
|
||||
model_response["created"] = time.time()
|
||||
model_response["model"] = "MockResponse"
|
||||
return model_response
|
||||
except:
|
||||
raise Exception("Mock completion response failed")
|
||||
### EMBEDDING ENDPOINTS ####################
|
||||
@client
|
||||
@timeout( # type: ignore
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue