Fix batches api cost tracking + Log batch models in spend logs / standard logging payload (#9077)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 42s

* feat(batches/): fix batch cost calculation - ensure it's accurate

use the correct cost value - prev. defaulting to non-batch cost

* feat(batch_utils.py): log batch models to spend logs + standard logging payload

makes it easy to understand how cost was calculated

* fix: fix stored payload for test

* test: fix test
This commit is contained in:
Krish Dholakia 2025-03-08 11:47:25 -08:00 committed by GitHub
parent 8c049dfffc
commit 4330ef8e81
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 110 additions and 7 deletions

View file

@ -35,7 +35,9 @@ def _is_master_key(api_key: str, _master_key: Optional[str]) -> bool:
def _get_spend_logs_metadata(
metadata: Optional[dict], applied_guardrails: Optional[List[str]] = None
metadata: Optional[dict],
applied_guardrails: Optional[List[str]] = None,
batch_models: Optional[List[str]] = None,
) -> SpendLogsMetadata:
if metadata is None:
return SpendLogsMetadata(
@ -52,6 +54,7 @@ def _get_spend_logs_metadata(
status=None or "success",
error_information=None,
proxy_server_request=None,
batch_models=None,
)
verbose_proxy_logger.debug(
"getting payload for SpendLogs, available keys in metadata: "
@ -67,7 +70,7 @@ def _get_spend_logs_metadata(
}
)
clean_metadata["applied_guardrails"] = applied_guardrails
clean_metadata["batch_models"] = batch_models
return clean_metadata
@ -192,6 +195,11 @@ def get_logging_payload( # noqa: PLR0915
if standard_logging_payload is not None
else None
),
batch_models=(
standard_logging_payload.get("hidden_params", {}).get("batch_models", None)
if standard_logging_payload is not None
else None
),
)
special_usage_fields = ["completion_tokens", "prompt_tokens", "total_tokens"]