mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(refactor) use helper function _assemble_complete_response_from_streaming_chunks
to assemble complete responses in caching and logging callbacks (#6220)
* (refactor) use _assemble_complete_response_from_streaming_chunks * add unit test for test_assemble_complete_response_from_streaming_chunks_1 * fix assemble complete_streaming_response * config add logging_testing * add logging_coverage in codecov * test test_assemble_complete_response_from_streaming_chunks_3 * add unit tests for _assemble_complete_response_from_streaming_chunks * fix remove unused / junk function * add test for streaming_chunks when error assembling
This commit is contained in:
parent
e9a46b992c
commit
a69c670baa
9 changed files with 571 additions and 90 deletions
|
@ -280,6 +280,9 @@ class CompletionCustomHandler(
|
|||
|
||||
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||
try:
|
||||
print(
|
||||
"in async_log_success_event", kwargs, response_obj, start_time, end_time
|
||||
)
|
||||
self.states.append("async_success")
|
||||
## START TIME
|
||||
assert isinstance(start_time, datetime)
|
||||
|
@ -522,6 +525,7 @@ async def test_async_chat_azure_stream():
|
|||
@pytest.mark.asyncio
|
||||
async def test_async_chat_openai_stream_options():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
customHandler = CompletionCustomHandler()
|
||||
litellm.callbacks = [customHandler]
|
||||
with patch.object(
|
||||
|
@ -536,7 +540,7 @@ async def test_async_chat_openai_stream_options():
|
|||
|
||||
async for chunk in response:
|
||||
continue
|
||||
|
||||
print("mock client args list=", mock_client.await_args_list)
|
||||
mock_client.assert_awaited_once()
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred: {str(e)}")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue