fix testing spend_tracking

This commit is contained in:
Ishaan Jaff 2024-06-21 16:52:42 -07:00
parent 29c2155796
commit fff928b10b
4 changed files with 86 additions and 72 deletions

View file

@ -2,9 +2,14 @@
## This tests the blocked user pre call hook for the proxy server
import sys, os, asyncio, time, random
from datetime import datetime
import asyncio
import os
import random
import sys
import time
import traceback
from datetime import datetime
from dotenv import load_dotenv
from fastapi import Request
@ -14,57 +19,53 @@ import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import asyncio
import logging
import pytest
import litellm
from litellm import Router, mock_completion
from litellm._logging import verbose_proxy_logger
from litellm.caching import DualCache
from litellm.proxy._types import UserAPIKeyAuth
from litellm.proxy.enterprise.enterprise_hooks.blocked_user_list import (
_ENTERPRISE_BlockedUserList,
)
from litellm import Router, mock_completion
from litellm.proxy.utils import ProxyLogging, hash_token
from litellm.proxy._types import UserAPIKeyAuth
from litellm.caching import DualCache
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token
import pytest, logging, asyncio
import litellm, asyncio
from litellm.proxy.proxy_server import (
user_api_key_auth,
block_user,
from litellm.proxy.management_endpoints.internal_user_endpoints import (
new_user,
user_info,
user_update,
)
from litellm.proxy.management_endpoints.key_management_endpoints import (
delete_key_fn,
info_key_fn,
update_key_fn,
generate_key_fn,
generate_key_helper_fn,
info_key_fn,
update_key_fn,
)
from litellm.proxy.management_endpoints.internal_user_endpoints import (
new_user,
user_update,
user_info,
)
from litellm.proxy.spend_reporting_endpoints.spend_management_endpoints import (
spend_user_fn,
from litellm.proxy.proxy_server import block_user, user_api_key_auth
from litellm.proxy.spend_tracking.spend_management_endpoints import (
spend_key_fn,
spend_user_fn,
view_spend_logs,
)
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token
from litellm._logging import verbose_proxy_logger
verbose_proxy_logger.setLevel(level=logging.DEBUG)
from starlette.datastructures import URL
from litellm.caching import DualCache
from litellm.proxy._types import (
NewUserRequest,
GenerateKeyRequest,
DynamoDBArgs,
KeyRequest,
UpdateKeyRequest,
GenerateKeyRequest,
BlockUsers,
DynamoDBArgs,
GenerateKeyRequest,
KeyRequest,
NewUserRequest,
UpdateKeyRequest,
)
from litellm.proxy.utils import DBClient
from starlette.datastructures import URL
from litellm.caching import DualCache
proxy_logging_obj = ProxyLogging(user_api_key_cache=DualCache())

View file

@ -75,7 +75,7 @@ from litellm.proxy.proxy_server import (
new_end_user,
user_api_key_auth,
)
from litellm.proxy.spend_reporting_endpoints.spend_management_endpoints import (
from litellm.proxy.spend_tracking.spend_management_endpoints import (
spend_key_fn,
spend_user_fn,
view_spend_logs,

View file

@ -1,26 +1,32 @@
import sys, os
import traceback, uuid
import os
import sys
import traceback
import uuid
from dotenv import load_dotenv
from fastapi import Request
from fastapi.routing import APIRoute
load_dotenv()
import os, io, time
import io
import os
import time
# this file is to test litellm/proxy
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest, logging, asyncio
import litellm, asyncio
import json
import asyncio
import datetime
from litellm.proxy.utils import (
get_logging_payload,
SpendLogsPayload,
SpendLogsMetadata,
) # noqa: E402
import json
import logging
import pytest
import litellm
from litellm.proxy.spend_tracking.spend_tracking_utils import get_logging_payload
from litellm.proxy.utils import SpendLogsMetadata, SpendLogsPayload # noqa: E402
def test_spend_logs_payload():
@ -53,6 +59,7 @@ def test_spend_logs_payload():
"model_alias_map": {},
"completion_call_id": None,
"metadata": {
"tags": ["model-anthropic-claude-v2.1", "app-ishaan-prod"],
"user_api_key": "88dc28d0f030c55ed4ab77ed8faf098196cb1c05df778539800c9f1243fe6b4b",
"user_api_key_alias": None,
"user_api_end_user_max_budget": None,
@ -193,3 +200,8 @@ def test_spend_logs_payload():
assert isinstance(payload["metadata"], str)
payload["metadata"] = json.loads(payload["metadata"])
assert set(payload["metadata"].keys()) == set(expected_metadata_keys)
# This is crucial - used in PROD, it should pass, related issue: https://github.com/BerriAI/litellm/issues/4334
assert (
payload["request_tags"] == '["model-anthropic-claude-v2.1", "app-ishaan-prod"]'
)

View file

@ -2,9 +2,14 @@
## This tests the batch update spend logic on the proxy server
import sys, os, asyncio, time, random
from datetime import datetime
import asyncio
import os
import random
import sys
import time
import traceback
from datetime import datetime
from dotenv import load_dotenv
from fastapi import Request
@ -14,54 +19,50 @@ import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import asyncio
import logging
import pytest
import litellm
from litellm import Router, mock_completion
from litellm.proxy.utils import ProxyLogging
from litellm.proxy._types import UserAPIKeyAuth
from litellm._logging import verbose_proxy_logger
from litellm.caching import DualCache
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token
import pytest, logging, asyncio
import litellm, asyncio
from litellm.proxy.proxy_server import (
user_api_key_auth,
block_user,
)
from litellm.proxy.spend_reporting_endpoints.spend_management_endpoints import (
spend_user_fn,
spend_key_fn,
view_spend_logs,
)
from litellm.proxy._types import UserAPIKeyAuth
from litellm.proxy.management_endpoints.internal_user_endpoints import (
new_user,
user_update,
user_info,
user_update,
)
from litellm.proxy.management_endpoints.key_management_endpoints import (
delete_key_fn,
info_key_fn,
update_key_fn,
generate_key_fn,
generate_key_helper_fn,
info_key_fn,
update_key_fn,
)
from litellm.proxy.proxy_server import block_user, user_api_key_auth
from litellm.proxy.spend_tracking.spend_management_endpoints import (
spend_key_fn,
spend_user_fn,
view_spend_logs,
)
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token, update_spend
from litellm._logging import verbose_proxy_logger
verbose_proxy_logger.setLevel(level=logging.DEBUG)
from starlette.datastructures import URL
from litellm.caching import DualCache
from litellm.proxy._types import (
NewUserRequest,
GenerateKeyRequest,
DynamoDBArgs,
KeyRequest,
UpdateKeyRequest,
GenerateKeyRequest,
BlockUsers,
DynamoDBArgs,
GenerateKeyRequest,
KeyRequest,
NewUserRequest,
UpdateKeyRequest,
)
from litellm.proxy.utils import DBClient
from starlette.datastructures import URL
from litellm.caching import DualCache
proxy_logging_obj = ProxyLogging(user_api_key_cache=DualCache())