mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
* fix(route_llm_request.py): move to using common router, even for client-side credentials ensures fallbacks / cooldown logic still works * test(test_route_llm_request.py): add unit test for route request * feat(router.py): generate unique model id when clientside credential passed in Prevents cooldowns for api key 1 from impacting api key 2 * test(test_router.py): update testing to ensure original litellm params not mutated * fix(router.py): upsert clientside call into llm router model list enables cooldown logic to work accurately * fix: fix linting error * test(test_router_utils.py): add direct test for new util on router
37 lines
938 B
Python
37 lines
938 B
Python
"""
|
|
Utils for handling clientside credentials
|
|
|
|
Supported clientside credentials:
|
|
- api_key
|
|
- api_base
|
|
- base_url
|
|
|
|
If given, generate a unique model_id for the deployment.
|
|
|
|
Ensures cooldowns are applied correctly.
|
|
"""
|
|
|
|
clientside_credential_keys = ["api_key", "api_base", "base_url"]
|
|
|
|
|
|
def is_clientside_credential(request_kwargs: dict) -> bool:
|
|
"""
|
|
Check if the credential is a clientside credential.
|
|
"""
|
|
return any(key in request_kwargs for key in clientside_credential_keys)
|
|
|
|
|
|
def get_dynamic_litellm_params(litellm_params: dict, request_kwargs: dict) -> dict:
|
|
"""
|
|
Generate a unique model_id for the deployment.
|
|
|
|
Returns
|
|
- litellm_params: dict
|
|
|
|
for generating a unique model_id.
|
|
"""
|
|
# update litellm_params with clientside credentials
|
|
for key in clientside_credential_keys:
|
|
if key in request_kwargs:
|
|
litellm_params[key] = request_kwargs[key]
|
|
return litellm_params
|