Make test_generate_and_call_with_valid_key parametrized

This allows us to test the same code with different routes.
For example, it lets us test the `/engines/{model}/chat/completions`
route, which https://github.com/BerriAI/litellm/pull/3663 fixes.
This commit is contained in:
Marc Abramowitz 2024-05-16 09:54:10 -07:00
parent 79e2ff1b7a
commit a42fec803c

View file

@ -23,6 +23,7 @@ import sys, os
import traceback
from dotenv import load_dotenv
from fastapi import Request
from fastapi.routing import APIRoute
from datetime import datetime
load_dotenv()
@ -51,6 +52,7 @@ from litellm.proxy.proxy_server import (
user_info,
info_key_fn,
new_team,
chat_completion,
)
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token, update_spend
from litellm._logging import verbose_proxy_logger
@ -146,7 +148,13 @@ async def test_new_user_response(prisma_client):
pytest.fail(f"Got exception {e}")
def test_generate_and_call_with_valid_key(prisma_client):
@pytest.mark.parametrize(
"api_route", [
APIRoute(path="/chat/completions", endpoint=chat_completion),
APIRoute(path="/engines/gpt-35-turbo-0125/chat/completions", endpoint=chat_completion),
],
)
def test_generate_and_call_with_valid_key(prisma_client, api_route):
# 1. Generate a Key, and use it to make a call
print("prisma client=", prisma_client)
@ -181,8 +189,12 @@ def test_generate_and_call_with_valid_key(prisma_client):
)
print("token from prisma", value_from_prisma)
request = Request(scope={"type": "http"})
request._url = URL(url="/chat/completions")
request = Request({
"type": "http",
"route": api_route,
"path": api_route.path,
"headers": [("Authorization", bearer_token)]
})
# use generated key to auth in
result = await user_api_key_auth(request=request, api_key=bearer_token)