mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(test_parallel_request_limiter.py): use mock responses for streaming
This commit is contained in:
parent
1ef7ad3416
commit
b9393fb769
5 changed files with 35 additions and 5 deletions
|
@ -1576,7 +1576,7 @@ class Logging:
|
|||
# only add to cache once we have a complete streaming response
|
||||
litellm.cache.add_cache(result, **kwargs)
|
||||
if isinstance(callback, CustomLogger): # custom logger class
|
||||
print_verbose(f"Async success callbacks: CustomLogger")
|
||||
print_verbose(f"Async success callbacks: {callback}")
|
||||
if self.stream:
|
||||
if "complete_streaming_response" in self.model_call_details:
|
||||
await callback.async_log_success_event(
|
||||
|
@ -8819,6 +8819,14 @@ def mock_completion_streaming_obj(model_response, mock_response, model):
|
|||
yield model_response
|
||||
|
||||
|
||||
async def async_mock_completion_streaming_obj(model_response, mock_response, model):
|
||||
for i in range(0, len(mock_response), 3):
|
||||
completion_obj = Delta(role="assistant", content=mock_response)
|
||||
model_response.choices[0].delta = completion_obj
|
||||
model_response.choices[0].finish_reason = "stop"
|
||||
yield model_response
|
||||
|
||||
|
||||
########## Reading Config File ############################
|
||||
def read_config_args(config_path) -> dict:
|
||||
try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue