diff --git a/litellm/tests/test_key_generate_prisma.py b/litellm/tests/test_key_generate_prisma.py index 2eb693cf45..75379a58ab 100644 --- a/litellm/tests/test_key_generate_prisma.py +++ b/litellm/tests/test_key_generate_prisma.py @@ -23,6 +23,7 @@ import sys, os import traceback from dotenv import load_dotenv from fastapi import Request +from fastapi.routing import APIRoute from datetime import datetime load_dotenv() @@ -51,6 +52,7 @@ from litellm.proxy.proxy_server import ( user_info, info_key_fn, new_team, + chat_completion, ) from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token, update_spend from litellm._logging import verbose_proxy_logger @@ -146,7 +148,13 @@ async def test_new_user_response(prisma_client): pytest.fail(f"Got exception {e}") -def test_generate_and_call_with_valid_key(prisma_client): +@pytest.mark.parametrize( + "api_route", [ + APIRoute(path="/chat/completions", endpoint=chat_completion), + APIRoute(path="/engines/gpt-35-turbo-0125/chat/completions", endpoint=chat_completion), + ], +) +def test_generate_and_call_with_valid_key(prisma_client, api_route): # 1. Generate a Key, and use it to make a call print("prisma client=", prisma_client) @@ -181,8 +189,12 @@ def test_generate_and_call_with_valid_key(prisma_client): ) print("token from prisma", value_from_prisma) - request = Request(scope={"type": "http"}) - request._url = URL(url="/chat/completions") + request = Request({ + "type": "http", + "route": api_route, + "path": api_route.path, + "headers": [("Authorization", bearer_token)] + }) # use generated key to auth in result = await user_api_key_auth(request=request, api_key=bearer_token)