mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
* fix(route_llm_request.py): move to using common router, even for client-side credentials ensures fallbacks / cooldown logic still works * test(test_route_llm_request.py): add unit test for route request * feat(router.py): generate unique model id when clientside credential passed in Prevents cooldowns for api key 1 from impacting api key 2 * test(test_router.py): update testing to ensure original litellm params not mutated * fix(router.py): upsert clientside call into llm router model list enables cooldown logic to work accurately * fix: fix linting error * test(test_router_utils.py): add direct test for new util on router
46 lines
1.2 KiB
Python
46 lines
1.2 KiB
Python
import json
|
|
import os
|
|
import sys
|
|
|
|
import pytest
|
|
from fastapi.testclient import TestClient
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../../..")
|
|
) # Adds the parent directory to the system path
|
|
|
|
|
|
from unittest.mock import MagicMock
|
|
|
|
from litellm.proxy.route_llm_request import route_request
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"route_type",
|
|
[
|
|
"atext_completion",
|
|
"acompletion",
|
|
"aembedding",
|
|
"aimage_generation",
|
|
"aspeech",
|
|
"atranscription",
|
|
"amoderation",
|
|
"arerank",
|
|
],
|
|
)
|
|
@pytest.mark.asyncio
|
|
async def test_route_request_dynamic_credentials(route_type):
|
|
data = {
|
|
"model": "openai/gpt-4o-mini-2024-07-18",
|
|
"api_key": "my-bad-key",
|
|
"api_base": "https://api.openai.com/v1 ",
|
|
}
|
|
llm_router = MagicMock()
|
|
# Ensure that the dynamic method exists on the llm_router mock.
|
|
getattr(llm_router, route_type).return_value = "fake_response"
|
|
|
|
response = await route_request(data, llm_router, None, route_type)
|
|
# Optionally verify the response if needed:
|
|
assert response == "fake_response"
|
|
# Now assert that the dynamic method was called once with the expected kwargs.
|
|
getattr(llm_router, route_type).assert_called_once_with(**data)
|