mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
feat(prometheus_services.py): emit proxy latency for successful llm api requests
uses prometheus histogram for this
This commit is contained in:
parent
1b98503be3
commit
7f5bcf38b7
6 changed files with 87 additions and 20 deletions
|
@ -30,6 +30,7 @@ class PrometheusServicesLogger:
|
|||
raise Exception(
|
||||
"Missing prometheus_client. Run `pip install prometheus-client`"
|
||||
)
|
||||
print("INITIALIZES PROMETHEUS SERVICE LOGGER!")
|
||||
|
||||
self.Histogram = Histogram
|
||||
self.Counter = Counter
|
||||
|
@ -151,6 +152,7 @@ class PrometheusServicesLogger:
|
|||
if self.mock_testing:
|
||||
self.mock_testing_success_calls += 1
|
||||
|
||||
print(f"LOGS SUCCESSFUL CALL TO PROMETHEUS - payload={payload}")
|
||||
if payload.service.value in self.payload_to_prometheus_map:
|
||||
prom_objects = self.payload_to_prometheus_map[payload.service.value]
|
||||
for obj in prom_objects:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue