mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(caching.py): dual cache async_batch_get_cache fix + testing
this fixes a bug in usage-based-routing-v2 which was caused b/c of how the result was being returned from dual cache async_batch_get_cache. it also adds unit testing for that function (and it's sync equivalent)
This commit is contained in:
parent
3c6b6355c7
commit
01a1a8f731
8 changed files with 149 additions and 10 deletions
|
@ -152,7 +152,6 @@ class PrometheusServicesLogger:
|
|||
if self.mock_testing:
|
||||
self.mock_testing_success_calls += 1
|
||||
|
||||
print(f"LOGS SUCCESSFUL CALL TO PROMETHEUS - payload={payload}")
|
||||
if payload.service.value in self.payload_to_prometheus_map:
|
||||
prom_objects = self.payload_to_prometheus_map[payload.service.value]
|
||||
for obj in prom_objects:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue