mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(utils.py): enable streaming cache logging
This commit is contained in:
parent
b011c8b93a
commit
2d62dee712
2 changed files with 12 additions and 7 deletions
|
@ -1411,7 +1411,7 @@ class Logging:
|
|||
print_verbose(
|
||||
f"success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n"
|
||||
)
|
||||
return
|
||||
pass
|
||||
else:
|
||||
print_verbose(
|
||||
"success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache"
|
||||
|
@ -1616,7 +1616,7 @@ class Logging:
|
|||
print_verbose(
|
||||
f"async success_callback: reaches cache for logging, there is no complete_streaming_response. Kwargs={kwargs}\n\n"
|
||||
)
|
||||
return
|
||||
pass
|
||||
else:
|
||||
print_verbose(
|
||||
"async success_callback: reaches cache for logging, there is a complete_streaming_response. Adding to cache"
|
||||
|
@ -1625,8 +1625,10 @@ class Logging:
|
|||
# only add to cache once we have a complete streaming response
|
||||
litellm.cache.add_cache(result, **kwargs)
|
||||
if isinstance(callback, CustomLogger): # custom logger class
|
||||
print_verbose(f"Async success callbacks: {callback}")
|
||||
if self.stream:
|
||||
print_verbose(
|
||||
f"Async success callbacks: {callback}; self.stream: {self.stream}; complete_streaming_response: {self.model_call_details.get('complete_streaming_response', None)}"
|
||||
)
|
||||
if self.stream == True:
|
||||
if "complete_streaming_response" in self.model_call_details:
|
||||
await callback.async_log_success_event(
|
||||
kwargs=self.model_call_details,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue