(refactor) use helper function _assemble_complete_response_from_streaming_chunks to assemble complete responses in caching and logging callbacks (#6220)

* (refactor) use _assemble_complete_response_from_streaming_chunks

* add unit test for test_assemble_complete_response_from_streaming_chunks_1

* fix assemble complete_streaming_response

* config add logging_testing

* add logging_coverage in codecov

* test test_assemble_complete_response_from_streaming_chunks_3

* add unit tests for _assemble_complete_response_from_streaming_chunks

* fix remove unused / junk function

* add test for streaming_chunks when error assembling
This commit is contained in:
Ishaan Jaff 2024-10-15 12:45:12 +05:30 committed by GitHub
parent e9a46b992c
commit a69c670baa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 571 additions and 90 deletions

View file

@ -280,6 +280,9 @@ class CompletionCustomHandler(
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
try:
print(
"in async_log_success_event", kwargs, response_obj, start_time, end_time
)
self.states.append("async_success")
## START TIME
assert isinstance(start_time, datetime)
@ -522,6 +525,7 @@ async def test_async_chat_azure_stream():
@pytest.mark.asyncio
async def test_async_chat_openai_stream_options():
try:
litellm.set_verbose = True
customHandler = CompletionCustomHandler()
litellm.callbacks = [customHandler]
with patch.object(
@ -536,7 +540,7 @@ async def test_async_chat_openai_stream_options():
async for chunk in response:
continue
print("mock client args list=", mock_client.await_args_list)
mock_client.assert_awaited_once()
except Exception as e:
pytest.fail(f"An exception occurred: {str(e)}")