mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
(refactor) - migrate router.deployment_callback_on_success
to use StandardLoggingPayload (#7015)
* migrate deployment_callback_on_success to use SLP * test_deployment_callback_on_success
This commit is contained in:
parent
93c419868e
commit
54407b4592
3 changed files with 145 additions and 9 deletions
|
@ -136,6 +136,7 @@ from litellm.types.router import (
|
||||||
from litellm.types.services import ServiceLoggerPayload, ServiceTypes
|
from litellm.types.services import ServiceLoggerPayload, ServiceTypes
|
||||||
from litellm.types.utils import OPENAI_RESPONSE_HEADERS
|
from litellm.types.utils import OPENAI_RESPONSE_HEADERS
|
||||||
from litellm.types.utils import ModelInfo as ModelMapInfo
|
from litellm.types.utils import ModelInfo as ModelMapInfo
|
||||||
|
from litellm.types.utils import StandardLoggingPayload
|
||||||
from litellm.utils import (
|
from litellm.utils import (
|
||||||
CustomStreamWrapper,
|
CustomStreamWrapper,
|
||||||
ModelResponse,
|
ModelResponse,
|
||||||
|
@ -3297,26 +3298,26 @@ class Router:
|
||||||
Track remaining tpm/rpm quota for model in model_list
|
Track remaining tpm/rpm quota for model in model_list
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
standard_logging_object: Optional[StandardLoggingPayload] = kwargs.get(
|
||||||
|
"standard_logging_object", None
|
||||||
|
)
|
||||||
|
if standard_logging_object is None:
|
||||||
|
raise ValueError("standard_logging_object is None")
|
||||||
if kwargs["litellm_params"].get("metadata") is None:
|
if kwargs["litellm_params"].get("metadata") is None:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
deployment_name = kwargs["litellm_params"]["metadata"].get(
|
deployment_name = kwargs["litellm_params"]["metadata"].get(
|
||||||
"deployment", None
|
"deployment", None
|
||||||
) # stable name - works for wildcard routes as well
|
) # stable name - works for wildcard routes as well
|
||||||
model_group = kwargs["litellm_params"]["metadata"].get(
|
model_group = standard_logging_object.get("model_group", None)
|
||||||
"model_group", None
|
id = standard_logging_object.get("model_id", None)
|
||||||
)
|
|
||||||
model_info = kwargs["litellm_params"].get("model_info", {}) or {}
|
|
||||||
id = model_info.get("id", None)
|
|
||||||
if model_group is None or id is None:
|
if model_group is None or id is None:
|
||||||
return
|
return
|
||||||
elif isinstance(id, int):
|
elif isinstance(id, int):
|
||||||
id = str(id)
|
id = str(id)
|
||||||
|
|
||||||
parent_otel_span = _get_parent_otel_span_from_kwargs(kwargs)
|
parent_otel_span = _get_parent_otel_span_from_kwargs(kwargs)
|
||||||
|
total_tokens: float = standard_logging_object.get("total_tokens", 0)
|
||||||
_usage_obj = completion_response.get("usage")
|
|
||||||
total_tokens = _usage_obj.get("total_tokens", 0) if _usage_obj else 0
|
|
||||||
|
|
||||||
# ------------
|
# ------------
|
||||||
# Setup values
|
# Setup values
|
||||||
|
|
131
tests/router_unit_tests/create_mock_standard_logging_payload.py
Normal file
131
tests/router_unit_tests/create_mock_standard_logging_payload.py
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.abspath("../.."))
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import gzip
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
import litellm
|
||||||
|
from litellm import completion
|
||||||
|
from litellm._logging import verbose_logger
|
||||||
|
from litellm.integrations.datadog.datadog import *
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from litellm.types.utils import (
|
||||||
|
StandardLoggingPayload,
|
||||||
|
StandardLoggingModelInformation,
|
||||||
|
StandardLoggingMetadata,
|
||||||
|
StandardLoggingHiddenParams,
|
||||||
|
)
|
||||||
|
|
||||||
|
verbose_logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
|
def create_standard_logging_payload() -> StandardLoggingPayload:
|
||||||
|
return StandardLoggingPayload(
|
||||||
|
id="test_id",
|
||||||
|
call_type="completion",
|
||||||
|
response_cost=0.1,
|
||||||
|
response_cost_failure_debug_info=None,
|
||||||
|
status="success",
|
||||||
|
total_tokens=30,
|
||||||
|
prompt_tokens=20,
|
||||||
|
completion_tokens=10,
|
||||||
|
startTime=1234567890.0,
|
||||||
|
endTime=1234567891.0,
|
||||||
|
completionStartTime=1234567890.5,
|
||||||
|
model_map_information=StandardLoggingModelInformation(
|
||||||
|
model_map_key="gpt-3.5-turbo", model_map_value=None
|
||||||
|
),
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
model_id="model-123",
|
||||||
|
model_group="openai-gpt",
|
||||||
|
api_base="https://api.openai.com",
|
||||||
|
metadata=StandardLoggingMetadata(
|
||||||
|
user_api_key_hash="test_hash",
|
||||||
|
user_api_key_org_id=None,
|
||||||
|
user_api_key_alias="test_alias",
|
||||||
|
user_api_key_team_id="test_team",
|
||||||
|
user_api_key_user_id="test_user",
|
||||||
|
user_api_key_team_alias="test_team_alias",
|
||||||
|
spend_logs_metadata=None,
|
||||||
|
requester_ip_address="127.0.0.1",
|
||||||
|
requester_metadata=None,
|
||||||
|
),
|
||||||
|
cache_hit=False,
|
||||||
|
cache_key=None,
|
||||||
|
saved_cache_cost=0.0,
|
||||||
|
request_tags=[],
|
||||||
|
end_user=None,
|
||||||
|
requester_ip_address="127.0.0.1",
|
||||||
|
messages=[{"role": "user", "content": "Hello, world!"}],
|
||||||
|
response={"choices": [{"message": {"content": "Hi there!"}}]},
|
||||||
|
error_str=None,
|
||||||
|
model_parameters={"stream": True},
|
||||||
|
hidden_params=StandardLoggingHiddenParams(
|
||||||
|
model_id="model-123",
|
||||||
|
cache_key=None,
|
||||||
|
api_base="https://api.openai.com",
|
||||||
|
response_cost="0.1",
|
||||||
|
additional_headers=None,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_standard_logging_payload_with_long_content() -> StandardLoggingPayload:
|
||||||
|
return StandardLoggingPayload(
|
||||||
|
id="test_id",
|
||||||
|
call_type="completion",
|
||||||
|
response_cost=0.1,
|
||||||
|
response_cost_failure_debug_info=None,
|
||||||
|
status="success",
|
||||||
|
total_tokens=30,
|
||||||
|
prompt_tokens=20,
|
||||||
|
completion_tokens=10,
|
||||||
|
startTime=1234567890.0,
|
||||||
|
endTime=1234567891.0,
|
||||||
|
completionStartTime=1234567890.5,
|
||||||
|
model_map_information=StandardLoggingModelInformation(
|
||||||
|
model_map_key="gpt-3.5-turbo", model_map_value=None
|
||||||
|
),
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
model_id="model-123",
|
||||||
|
model_group="openai-gpt",
|
||||||
|
api_base="https://api.openai.com",
|
||||||
|
metadata=StandardLoggingMetadata(
|
||||||
|
user_api_key_hash="test_hash",
|
||||||
|
user_api_key_org_id=None,
|
||||||
|
user_api_key_alias="test_alias",
|
||||||
|
user_api_key_team_id="test_team",
|
||||||
|
user_api_key_user_id="test_user",
|
||||||
|
user_api_key_team_alias="test_team_alias",
|
||||||
|
spend_logs_metadata=None,
|
||||||
|
requester_ip_address="127.0.0.1",
|
||||||
|
requester_metadata=None,
|
||||||
|
),
|
||||||
|
cache_hit=False,
|
||||||
|
cache_key=None,
|
||||||
|
saved_cache_cost=0.0,
|
||||||
|
request_tags=[],
|
||||||
|
end_user=None,
|
||||||
|
requester_ip_address="127.0.0.1",
|
||||||
|
messages=[{"role": "user", "content": "Hello, world!" * 80000}],
|
||||||
|
response={"choices": [{"message": {"content": "Hi there!" * 80000}}]},
|
||||||
|
error_str="error_str" * 80000,
|
||||||
|
model_parameters={"stream": True},
|
||||||
|
hidden_params=StandardLoggingHiddenParams(
|
||||||
|
model_id="model-123",
|
||||||
|
cache_key=None,
|
||||||
|
api_base="https://api.openai.com",
|
||||||
|
response_cost="0.1",
|
||||||
|
additional_headers=None,
|
||||||
|
),
|
||||||
|
)
|
|
@ -12,6 +12,8 @@ from litellm import Router
|
||||||
import pytest
|
import pytest
|
||||||
import litellm
|
import litellm
|
||||||
from unittest.mock import patch, MagicMock, AsyncMock
|
from unittest.mock import patch, MagicMock, AsyncMock
|
||||||
|
from create_mock_standard_logging_payload import create_standard_logging_payload
|
||||||
|
from litellm.types.utils import StandardLoggingPayload
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
@ -366,7 +368,8 @@ async def test_deployment_callback_on_success(model_list, sync_mode):
|
||||||
import time
|
import time
|
||||||
|
|
||||||
router = Router(model_list=model_list)
|
router = Router(model_list=model_list)
|
||||||
|
standard_logging_payload = create_standard_logging_payload()
|
||||||
|
standard_logging_payload["total_tokens"] = 100
|
||||||
kwargs = {
|
kwargs = {
|
||||||
"litellm_params": {
|
"litellm_params": {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -374,6 +377,7 @@ async def test_deployment_callback_on_success(model_list, sync_mode):
|
||||||
},
|
},
|
||||||
"model_info": {"id": 100},
|
"model_info": {"id": 100},
|
||||||
},
|
},
|
||||||
|
"standard_logging_object": standard_logging_payload,
|
||||||
}
|
}
|
||||||
response = litellm.ModelResponse(
|
response = litellm.ModelResponse(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue