mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* fix(ollama.py): fix get model info request Fixes https://github.com/BerriAI/litellm/issues/6703 * feat(anthropic/chat/transformation.py): support passing user id to anthropic via openai 'user' param * docs(anthropic.md): document all supported openai params for anthropic * test: fix tests * fix: fix tests * feat(jina_ai/): add rerank support Closes https://github.com/BerriAI/litellm/issues/6691 * test: handle service unavailable error * fix(handler.py): refactor together ai rerank call * test: update test to handle overloaded error * test: fix test * Litellm router trace (#6742) * feat(router.py): add trace_id to parent functions - allows tracking retry/fallbacks * feat(router.py): log trace id across retry/fallback logic allows grouping llm logs for the same request * test: fix tests * fix: fix test * fix(transformation.py): only set non-none stop_sequences * Litellm router disable fallbacks (#6743) * bump: version 1.52.6 → 1.52.7 * feat(router.py): enable dynamically disabling fallbacks Allows for enabling/disabling fallbacks per key * feat(litellm_pre_call_utils.py): support setting 'disable_fallbacks' on litellm key * test: fix test * fix(exception_mapping_utils.py): map 'model is overloaded' to internal server error * fix(lm_studio/embed): support translating lm studio optional params ' * feat(auth_checks.py): fix auth check inside route - `/team/list` Fixes regression where non-admin w/ user_id=None able to query all teams * docs proxy_budget_rescheduler_min_time * helm run DISABLE_SCHEMA_UPDATE * docs helm pre sync hook * fix migration job.yaml * fix DATABASE_URL * use existing spec for migrations job * fix yaml on migrations job * fix migration job * update doc on pre sync hook * fix migrations-job.yaml * fix migration job * fix prisma migration * test - handle eol model claude-2, use claude-2.1 instead * (docs) add instructions on how to contribute to docker image * Update code blocks huggingface.md (#6737) * Update prefix.md (#6734) * fix test_supports_response_schema * mark Helm PreSyn as BETA * (Feat) Add support for storing virtual keys in AWS SecretManager (#6728) * add SecretManager to httpxSpecialProvider * fix importing AWSSecretsManagerV2 * add unit testing for writing keys to AWS secret manager * use KeyManagementEventHooks for key/generated events * us event hooks for key management endpoints * working AWSSecretsManagerV2 * fix write secret to AWS secret manager on /key/generate * fix KeyManagementSettings * use tasks for key management hooks * add async_delete_secret * add test for async_delete_secret * use _delete_virtual_keys_from_secret_manager * fix test secret manager * test_key_generate_with_secret_manager_call * fix check for key_management_settings * sync_read_secret * test_aws_secret_manager * fix sync_read_secret * use helper to check when _should_read_secret_from_secret_manager * test_get_secret_with_access_mode * test - handle eol model claude-2, use claude-2.1 instead * docs AWS secret manager * fix test_read_nonexistent_secret * fix test_supports_response_schema * ci/cd run again * LiteLLM Minor Fixes & Improvement (11/14/2024) (#6730) * fix(ollama.py): fix get model info request Fixes https://github.com/BerriAI/litellm/issues/6703 * feat(anthropic/chat/transformation.py): support passing user id to anthropic via openai 'user' param * docs(anthropic.md): document all supported openai params for anthropic * test: fix tests * fix: fix tests * feat(jina_ai/): add rerank support Closes https://github.com/BerriAI/litellm/issues/6691 * test: handle service unavailable error * fix(handler.py): refactor together ai rerank call * test: update test to handle overloaded error * test: fix test * Litellm router trace (#6742) * feat(router.py): add trace_id to parent functions - allows tracking retry/fallbacks * feat(router.py): log trace id across retry/fallback logic allows grouping llm logs for the same request * test: fix tests * fix: fix test * fix(transformation.py): only set non-none stop_sequences * Litellm router disable fallbacks (#6743) * bump: version 1.52.6 → 1.52.7 * feat(router.py): enable dynamically disabling fallbacks Allows for enabling/disabling fallbacks per key * feat(litellm_pre_call_utils.py): support setting 'disable_fallbacks' on litellm key * test: fix test * fix(exception_mapping_utils.py): map 'model is overloaded' to internal server error * test: handle gemini error * test: fix test * fix: new run * bump: version 1.52.7 → 1.52.8 * docs: add docs on jina ai rerank support * docs(reliability.md): add tutorial on disabling fallbacks per key * docs(logging.md): add 'trace_id' param to standard logging payload * (feat) add bedrock/stability.stable-image-ultra-v1:0 (#6723) * add stability.stable-image-ultra-v1:0 * add pricing for stability.stable-image-ultra-v1:0 * fix test_supports_response_schema * ci/cd run again * [Feature]: Stop swallowing up AzureOpenAi exception responses in litellm's implementation for a BadRequestError (#6745) * fix azure exceptions * test_bad_request_error_contains_httpx_response * test_bad_request_error_contains_httpx_response * use safe access to get exception response * fix get attr * [Feature]: json_schema in response support for Anthropic (#6748) * _convert_tool_response_to_message * fix ModelResponseIterator * fix test_json_response_format * test_json_response_format_stream * fix _convert_tool_response_to_message * use helper _handle_json_mode_chunk * fix _process_response * unit testing for test_convert_tool_response_to_message_no_arguments * update doc for JSON mode * fix: import audio check (#6740) * fix imagegeneration output_cost_per_image on model cost map (#6752) * (feat) Vertex AI - add support for fine tuned embedding models (#6749) * fix use fine tuned vertex embedding models * test_vertex_embedding_url * add _transform_openai_request_to_fine_tuned_embedding_request * add _transform_openai_request_to_fine_tuned_embedding_request * add transform_openai_request_to_vertex_embedding_request * add _transform_vertex_response_to_openai_for_fine_tuned_models * test_vertexai_embedding for ft models * fix test_vertexai_embedding_finetuned * doc fine tuned / custom embedding models * fix test test_partner_models_httpx * bump: version 1.52.8 → 1.52.9 * LiteLLM Minor Fixes & Improvements (11/13/2024) (#6729) * fix(utils.py): add logprobs support for together ai Fixes https://github.com/BerriAI/litellm/issues/6724 * feat(pass_through_endpoints/): add anthropic/ pass-through endpoint adds new `anthropic/` pass-through endpoint + refactors docs * feat(spend_management_endpoints.py): allow /global/spend/report to query team + customer id enables seeing spend for a customer in a team * Add integration with MLflow Tracing (#6147) * Add MLflow logger Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * Streaming handling Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * lint Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * address comments and fix issues Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * address comments and fix issues Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * Move logger construction code Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * Add docs Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * async handlers Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * new picture Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> --------- Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * fix(mlflow.py): fix ruff linting errors * ci(config.yml): add mlflow to ci testing * fix: fix test * test: fix test * Litellm key update fix (#6710) * fix(caching): convert arg to equivalent kwargs in llm caching handler prevent unexpected errors * fix(caching_handler.py): don't pass args to caching * fix(caching): remove all *args from caching.py * fix(caching): consistent function signatures + abc method * test(caching_unit_tests.py): add unit tests for llm caching ensures coverage for common caching scenarios across different implementations * refactor(litellm_logging.py): move to using cache key from hidden params instead of regenerating one * fix(router.py): drop redis password requirement * fix(proxy_server.py): fix faulty slack alerting check * fix(langfuse.py): avoid copying functions/thread lock objects in metadata fixes metadata copy error when parent otel span in metadata * test: update test * fix(key_management_endpoints.py): fix /key/update with metadata update * fix(key_management_endpoints.py): fix key_prepare_update helper * fix(key_management_endpoints.py): reset value to none if set in key update * fix: update test ' * Litellm dev 11 11 2024 (#6693) * fix(__init__.py): add 'watsonx_text' as mapped llm api route Fixes https://github.com/BerriAI/litellm/issues/6663 * fix(opentelemetry.py): fix passing parallel tool calls to otel Fixes https://github.com/BerriAI/litellm/issues/6677 * refactor(test_opentelemetry_unit_tests.py): create a base set of unit tests for all logging integrations - test for parallel tool call handling reduces bugs in repo * fix(__init__.py): update provider-model mapping to include all known provider-model mappings Fixes https://github.com/BerriAI/litellm/issues/6669 * feat(anthropic): support passing document in llm api call * docs(anthropic.md): add pdf anthropic call to docs + expose new 'supports_pdf_input' function * fix(factory.py): fix linting error * add clear doc string for GCS bucket logging * Add docs to export logs to Laminar (#6674) * Add docs to export logs to Laminar * minor fix: newline at end of file * place laminar after http and grpc * (Feat) Add langsmith key based logging (#6682) * add langsmith_api_key to StandardCallbackDynamicParams * create a file for langsmith types * langsmith add key / team based logging * add key based logging for langsmith * fix langsmith key based logging * fix linting langsmith * remove NOQA violation * add unit test coverage for all helpers in test langsmith * test_langsmith_key_based_logging * docs langsmith key based logging * run langsmith tests in logging callback tests * fix logging testing * test_langsmith_key_based_logging * test_add_callback_via_key_litellm_pre_call_utils_langsmith * add debug statement langsmith key based logging * test_langsmith_key_based_logging * (fix) OpenAI's optional messages[].name does not work with Mistral API (#6701) * use helper for _transform_messages mistral * add test_message_with_name to base LLMChat test * fix linting * add xAI on Admin UI (#6680) * (docs) add benchmarks on 1K RPS (#6704) * docs litellm proxy benchmarks * docs GCS bucket * doc fix - reduce clutter on logging doc title * (feat) add cost tracking stable diffusion 3 on Bedrock (#6676) * add cost tracking for sd3 * test_image_generation_bedrock * fix get model info for image cost * add cost_calculator for stability 1 models * add unit testing for bedrock image cost calc * test_cost_calculator_with_no_optional_params * add test_cost_calculator_basic * correctly allow size Optional * fix cost_calculator * sd3 unit tests cost calc * fix raise correct error 404 when /key/info is called on non-existent key (#6653) * fix raise correct error on /key/info * add not_found_error error * fix key not found in DB error * use 1 helper for checking token hash * fix error code on key info * fix test key gen prisma * test_generate_and_call_key_info * test fix test_call_with_valid_model_using_all_models * fix key info tests * bump: version 1.52.4 → 1.52.5 * add defaults used for GCS logging * LiteLLM Minor Fixes & Improvements (11/12/2024) (#6705) * fix(caching): convert arg to equivalent kwargs in llm caching handler prevent unexpected errors * fix(caching_handler.py): don't pass args to caching * fix(caching): remove all *args from caching.py * fix(caching): consistent function signatures + abc method * test(caching_unit_tests.py): add unit tests for llm caching ensures coverage for common caching scenarios across different implementations * refactor(litellm_logging.py): move to using cache key from hidden params instead of regenerating one * fix(router.py): drop redis password requirement * fix(proxy_server.py): fix faulty slack alerting check * fix(langfuse.py): avoid copying functions/thread lock objects in metadata fixes metadata copy error when parent otel span in metadata * test: update test * bump: version 1.52.5 → 1.52.6 * (feat) helm hook to sync db schema (#6715) * v0 migration job * fix job * fix migrations job.yml * handle standalone DB on helm hook * fix argo cd annotations * fix db migration helm hook * fix migration job * doc fix Using Http/2 with Hypercorn * (fix proxy redis) Add redis sentinel support (#6154) * add sentinel_password support * add doc for setting redis sentinel password * fix redis sentinel - use sentinel password * Fix: Update gpt-4o costs to that of gpt-4o-2024-08-06 (#6714) Fixes #6713 * (fix) using Anthropic `response_format={"type": "json_object"}` (#6721) * add support for response_format=json anthropic * add test_json_response_format to baseLLM ChatTest * fix test_litellm_anthropic_prompt_caching_tools * fix test_anthropic_function_call_with_no_schema * test test_create_json_tool_call_for_response_format * (feat) Add cost tracking for Azure Dall-e-3 Image Generation + use base class to ensure basic image generation tests pass (#6716) * add BaseImageGenTest * use 1 class for unit testing * add debugging to BaseImageGenTest * TestAzureOpenAIDalle3 * fix response_cost_calculator * test_basic_image_generation * fix img gen basic test * fix _select_model_name_for_cost_calc * fix test_aimage_generation_bedrock_with_optional_params * fix undo changes cost tracking * fix response_cost_calculator * fix test_cost_azure_gpt_35 * fix remove dup test (#6718) * (build) update db helm hook * (build) helm db pre sync hook * (build) helm db sync hook * test: run test_team_logging firdst --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Dinmukhamed Mailibay <47117969+dinmukhamedm@users.noreply.github.com> Co-authored-by: Kilian Lieret <kilian.lieret@posteo.de> * test: update test * test: skip anthropic overloaded error * test: cleanup test * test: update tests * test: fix test * test: handle gemini overloaded model error * test: handle internal server error * test: handle anthropic overloaded error * test: handle claude instability --------- Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Dinmukhamed Mailibay <47117969+dinmukhamedm@users.noreply.github.com> Co-authored-by: Kilian Lieret <kilian.lieret@posteo.de> --------- Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Jongseob Jeon <aiden.jongseob@gmail.com> Co-authored-by: Camden Clark <camdenaws@gmail.com> Co-authored-by: Rasswanth <61219215+IamRash-7@users.noreply.github.com> Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Co-authored-by: Dinmukhamed Mailibay <47117969+dinmukhamedm@users.noreply.github.com> Co-authored-by: Kilian Lieret <kilian.lieret@posteo.de>
887 lines
30 KiB
Python
887 lines
30 KiB
Python
# What is this?
|
|
## Common auth checks between jwt + key based auth
|
|
"""
|
|
Got Valid Token from Cache, DB
|
|
Run checks for:
|
|
|
|
1. If user can call model
|
|
2. If user is in budget
|
|
3. If end_user ('user' passed to /chat/completions, /embeddings endpoint) is in budget
|
|
"""
|
|
|
|
import time
|
|
import traceback
|
|
from datetime import datetime
|
|
from typing import TYPE_CHECKING, Any, List, Literal, Optional
|
|
|
|
import httpx
|
|
from pydantic import BaseModel
|
|
|
|
import litellm
|
|
from litellm._logging import verbose_proxy_logger
|
|
from litellm.caching.caching import DualCache
|
|
from litellm.caching.dual_cache import LimitedSizeOrderedDict
|
|
from litellm.proxy._types import (
|
|
LiteLLM_EndUserTable,
|
|
LiteLLM_JWTAuth,
|
|
LiteLLM_OrganizationTable,
|
|
LiteLLM_TeamTable,
|
|
LiteLLM_TeamTableCachedObj,
|
|
LiteLLM_UserTable,
|
|
LiteLLMRoutes,
|
|
LitellmUserRoles,
|
|
UserAPIKeyAuth,
|
|
)
|
|
from litellm.proxy.auth.route_checks import RouteChecks
|
|
from litellm.proxy.utils import PrismaClient, ProxyLogging, log_db_metrics
|
|
from litellm.types.services import ServiceLoggerPayload, ServiceTypes
|
|
|
|
from .auth_checks_organization import organization_role_based_access_check
|
|
|
|
if TYPE_CHECKING:
|
|
from opentelemetry.trace import Span as _Span
|
|
|
|
Span = _Span
|
|
else:
|
|
Span = Any
|
|
|
|
|
|
last_db_access_time = LimitedSizeOrderedDict(max_size=100)
|
|
db_cache_expiry = 5 # refresh every 5s
|
|
|
|
all_routes = LiteLLMRoutes.openai_routes.value + LiteLLMRoutes.management_routes.value
|
|
|
|
|
|
def common_checks( # noqa: PLR0915
|
|
request_body: dict,
|
|
team_object: Optional[LiteLLM_TeamTable],
|
|
user_object: Optional[LiteLLM_UserTable],
|
|
end_user_object: Optional[LiteLLM_EndUserTable],
|
|
global_proxy_spend: Optional[float],
|
|
general_settings: dict,
|
|
route: str,
|
|
) -> bool:
|
|
"""
|
|
Common checks across jwt + key-based auth.
|
|
|
|
1. If team is blocked
|
|
2. If team can call model
|
|
3. If team is in budget
|
|
4. If user passed in (JWT or key.user_id) - is in budget
|
|
5. If end_user (either via JWT or 'user' passed to /chat/completions, /embeddings endpoint) is in budget
|
|
6. [OPTIONAL] If 'enforce_end_user' enabled - did developer pass in 'user' param for openai endpoints
|
|
7. [OPTIONAL] If 'litellm.max_budget' is set (>0), is proxy under budget
|
|
8. [OPTIONAL] If guardrails modified - is request allowed to change this
|
|
9. Check if request body is safe
|
|
10. [OPTIONAL] Organization checks - is user_object.organization_id is set, run these checks
|
|
"""
|
|
_model = request_body.get("model", None)
|
|
if team_object is not None and team_object.blocked is True:
|
|
raise Exception(
|
|
f"Team={team_object.team_id} is blocked. Update via `/team/unblock` if your admin."
|
|
)
|
|
# 2. If team can call model
|
|
if (
|
|
_model is not None
|
|
and team_object is not None
|
|
and team_object.models is not None
|
|
and len(team_object.models) > 0
|
|
and _model not in team_object.models
|
|
):
|
|
# this means the team has access to all models on the proxy
|
|
if (
|
|
"all-proxy-models" in team_object.models
|
|
or "*" in team_object.models
|
|
or "openai/*" in team_object.models
|
|
):
|
|
# this means the team has access to all models on the proxy
|
|
pass
|
|
# check if the team model is an access_group
|
|
elif model_in_access_group(_model, team_object.models) is True:
|
|
pass
|
|
elif _model and "*" in _model:
|
|
pass
|
|
else:
|
|
raise Exception(
|
|
f"Team={team_object.team_id} not allowed to call model={_model}. Allowed team models = {team_object.models}"
|
|
)
|
|
# 3. If team is in budget
|
|
if (
|
|
team_object is not None
|
|
and team_object.max_budget is not None
|
|
and team_object.spend is not None
|
|
and team_object.spend > team_object.max_budget
|
|
):
|
|
raise litellm.BudgetExceededError(
|
|
current_cost=team_object.spend,
|
|
max_budget=team_object.max_budget,
|
|
message=f"Team={team_object.team_id} over budget. Spend={team_object.spend}, Budget={team_object.max_budget}",
|
|
)
|
|
# 4. If user is in budget
|
|
## 4.1 check personal budget, if personal key
|
|
if (
|
|
(team_object is None or team_object.team_id is None)
|
|
and user_object is not None
|
|
and user_object.max_budget is not None
|
|
):
|
|
user_budget = user_object.max_budget
|
|
if user_budget < user_object.spend:
|
|
raise litellm.BudgetExceededError(
|
|
current_cost=user_object.spend,
|
|
max_budget=user_budget,
|
|
message=f"ExceededBudget: User={user_object.user_id} over budget. Spend={user_object.spend}, Budget={user_budget}",
|
|
)
|
|
## 4.2 check team member budget, if team key
|
|
# 5. If end_user ('user' passed to /chat/completions, /embeddings endpoint) is in budget
|
|
if end_user_object is not None and end_user_object.litellm_budget_table is not None:
|
|
end_user_budget = end_user_object.litellm_budget_table.max_budget
|
|
if end_user_budget is not None and end_user_object.spend > end_user_budget:
|
|
raise litellm.BudgetExceededError(
|
|
current_cost=end_user_object.spend,
|
|
max_budget=end_user_budget,
|
|
message=f"ExceededBudget: End User={end_user_object.user_id} over budget. Spend={end_user_object.spend}, Budget={end_user_budget}",
|
|
)
|
|
# 6. [OPTIONAL] If 'enforce_user_param' enabled - did developer pass in 'user' param for openai endpoints
|
|
if (
|
|
general_settings.get("enforce_user_param", None) is not None
|
|
and general_settings["enforce_user_param"] is True
|
|
):
|
|
if RouteChecks.is_llm_api_route(route=route) and "user" not in request_body:
|
|
raise Exception(
|
|
f"'user' param not passed in. 'enforce_user_param'={general_settings['enforce_user_param']}"
|
|
)
|
|
if general_settings.get("enforced_params", None) is not None:
|
|
# Enterprise ONLY Feature
|
|
# we already validate if user is premium_user when reading the config
|
|
# Add an extra premium_usercheck here too, just incase
|
|
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
|
|
|
|
if premium_user is not True:
|
|
raise ValueError(
|
|
"Trying to use `enforced_params`"
|
|
+ CommonProxyErrors.not_premium_user.value
|
|
)
|
|
|
|
if RouteChecks.is_llm_api_route(route=route):
|
|
# loop through each enforced param
|
|
# example enforced_params ['user', 'metadata', 'metadata.generation_name']
|
|
for enforced_param in general_settings["enforced_params"]:
|
|
_enforced_params = enforced_param.split(".")
|
|
if len(_enforced_params) == 1:
|
|
if _enforced_params[0] not in request_body:
|
|
raise ValueError(
|
|
f"BadRequest please pass param={_enforced_params[0]} in request body. This is a required param"
|
|
)
|
|
elif len(_enforced_params) == 2:
|
|
# this is a scenario where user requires request['metadata']['generation_name'] to exist
|
|
if _enforced_params[0] not in request_body:
|
|
raise ValueError(
|
|
f"BadRequest please pass param={_enforced_params[0]} in request body. This is a required param"
|
|
)
|
|
if _enforced_params[1] not in request_body[_enforced_params[0]]:
|
|
raise ValueError(
|
|
f"BadRequest please pass param=[{_enforced_params[0]}][{_enforced_params[1]}] in request body. This is a required param"
|
|
)
|
|
|
|
pass
|
|
# 7. [OPTIONAL] If 'litellm.max_budget' is set (>0), is proxy under budget
|
|
if (
|
|
litellm.max_budget > 0
|
|
and global_proxy_spend is not None
|
|
# only run global budget checks for OpenAI routes
|
|
# Reason - the Admin UI should continue working if the proxy crosses it's global budget
|
|
and RouteChecks.is_llm_api_route(route=route)
|
|
and route != "/v1/models"
|
|
and route != "/models"
|
|
):
|
|
if global_proxy_spend > litellm.max_budget:
|
|
raise litellm.BudgetExceededError(
|
|
current_cost=global_proxy_spend, max_budget=litellm.max_budget
|
|
)
|
|
|
|
_request_metadata: dict = request_body.get("metadata", {}) or {}
|
|
if _request_metadata.get("guardrails"):
|
|
# check if team allowed to modify guardrails
|
|
from litellm.proxy.guardrails.guardrail_helpers import can_modify_guardrails
|
|
|
|
can_modify: bool = can_modify_guardrails(team_object)
|
|
if can_modify is False:
|
|
from fastapi import HTTPException
|
|
|
|
raise HTTPException(
|
|
status_code=403,
|
|
detail={
|
|
"error": "Your team does not have permission to modify guardrails."
|
|
},
|
|
)
|
|
|
|
# 10 [OPTIONAL] Organization RBAC checks
|
|
organization_role_based_access_check(
|
|
user_object=user_object, route=route, request_body=request_body
|
|
)
|
|
|
|
return True
|
|
|
|
|
|
def _allowed_routes_check(user_route: str, allowed_routes: list) -> bool:
|
|
"""
|
|
Return if a user is allowed to access route. Helper function for `allowed_routes_check`.
|
|
|
|
Parameters:
|
|
- user_route: str - the route the user is trying to call
|
|
- allowed_routes: List[str|LiteLLMRoutes] - the list of allowed routes for the user.
|
|
"""
|
|
for allowed_route in allowed_routes:
|
|
if (
|
|
allowed_route in LiteLLMRoutes.__members__
|
|
and user_route in LiteLLMRoutes[allowed_route].value
|
|
):
|
|
return True
|
|
elif allowed_route == user_route:
|
|
return True
|
|
return False
|
|
|
|
|
|
def allowed_routes_check(
|
|
user_role: Literal[
|
|
LitellmUserRoles.PROXY_ADMIN,
|
|
LitellmUserRoles.TEAM,
|
|
LitellmUserRoles.INTERNAL_USER,
|
|
],
|
|
user_route: str,
|
|
litellm_proxy_roles: LiteLLM_JWTAuth,
|
|
) -> bool:
|
|
"""
|
|
Check if user -> not admin - allowed to access these routes
|
|
"""
|
|
|
|
if user_role == LitellmUserRoles.PROXY_ADMIN:
|
|
is_allowed = _allowed_routes_check(
|
|
user_route=user_route,
|
|
allowed_routes=litellm_proxy_roles.admin_allowed_routes,
|
|
)
|
|
return is_allowed
|
|
|
|
elif user_role == LitellmUserRoles.TEAM:
|
|
if litellm_proxy_roles.team_allowed_routes is None:
|
|
"""
|
|
By default allow a team to call openai + info routes
|
|
"""
|
|
is_allowed = _allowed_routes_check(
|
|
user_route=user_route, allowed_routes=["openai_routes", "info_routes"]
|
|
)
|
|
return is_allowed
|
|
elif litellm_proxy_roles.team_allowed_routes is not None:
|
|
is_allowed = _allowed_routes_check(
|
|
user_route=user_route,
|
|
allowed_routes=litellm_proxy_roles.team_allowed_routes,
|
|
)
|
|
return is_allowed
|
|
return False
|
|
|
|
|
|
def allowed_route_check_inside_route(
|
|
user_api_key_dict: UserAPIKeyAuth,
|
|
requested_user_id: Optional[str],
|
|
) -> bool:
|
|
ret_val = True
|
|
if (
|
|
user_api_key_dict.user_role != LitellmUserRoles.PROXY_ADMIN
|
|
and user_api_key_dict.user_role != LitellmUserRoles.PROXY_ADMIN_VIEW_ONLY
|
|
):
|
|
ret_val = False
|
|
if requested_user_id is not None and user_api_key_dict.user_id is not None:
|
|
if user_api_key_dict.user_id == requested_user_id:
|
|
ret_val = True
|
|
return ret_val
|
|
|
|
|
|
def get_actual_routes(allowed_routes: list) -> list:
|
|
actual_routes: list = []
|
|
for route_name in allowed_routes:
|
|
try:
|
|
route_value = LiteLLMRoutes[route_name].value
|
|
actual_routes = actual_routes + route_value
|
|
except KeyError:
|
|
actual_routes.append(route_name)
|
|
return actual_routes
|
|
|
|
|
|
@log_db_metrics
|
|
async def get_end_user_object(
|
|
end_user_id: Optional[str],
|
|
prisma_client: Optional[PrismaClient],
|
|
user_api_key_cache: DualCache,
|
|
parent_otel_span: Optional[Span] = None,
|
|
proxy_logging_obj: Optional[ProxyLogging] = None,
|
|
) -> Optional[LiteLLM_EndUserTable]:
|
|
"""
|
|
Returns end user object, if in db.
|
|
|
|
Do a isolated check for end user in table vs. doing a combined key + team + user + end-user check, as key might come in frequently for different end-users. Larger call will slowdown query time. This way we get to cache the constant (key/team/user info) and only update based on the changing value (end-user).
|
|
"""
|
|
if prisma_client is None:
|
|
raise Exception("No db connected")
|
|
|
|
if end_user_id is None:
|
|
return None
|
|
_key = "end_user_id:{}".format(end_user_id)
|
|
|
|
def check_in_budget(end_user_obj: LiteLLM_EndUserTable):
|
|
if end_user_obj.litellm_budget_table is None:
|
|
return
|
|
end_user_budget = end_user_obj.litellm_budget_table.max_budget
|
|
if end_user_budget is not None and end_user_obj.spend > end_user_budget:
|
|
raise litellm.BudgetExceededError(
|
|
current_cost=end_user_obj.spend, max_budget=end_user_budget
|
|
)
|
|
|
|
# check if in cache
|
|
cached_user_obj = await user_api_key_cache.async_get_cache(key=_key)
|
|
if cached_user_obj is not None:
|
|
if isinstance(cached_user_obj, dict):
|
|
return_obj = LiteLLM_EndUserTable(**cached_user_obj)
|
|
check_in_budget(end_user_obj=return_obj)
|
|
return return_obj
|
|
elif isinstance(cached_user_obj, LiteLLM_EndUserTable):
|
|
return_obj = cached_user_obj
|
|
check_in_budget(end_user_obj=return_obj)
|
|
return return_obj
|
|
# else, check db
|
|
try:
|
|
response = await prisma_client.db.litellm_endusertable.find_unique(
|
|
where={"user_id": end_user_id},
|
|
include={"litellm_budget_table": True},
|
|
)
|
|
|
|
if response is None:
|
|
raise Exception
|
|
|
|
# save the end-user object to cache
|
|
await user_api_key_cache.async_set_cache(
|
|
key="end_user_id:{}".format(end_user_id), value=response
|
|
)
|
|
|
|
_response = LiteLLM_EndUserTable(**response.dict())
|
|
|
|
check_in_budget(end_user_obj=_response)
|
|
|
|
return _response
|
|
except Exception as e: # if end-user not in db
|
|
if isinstance(e, litellm.BudgetExceededError):
|
|
raise e
|
|
return None
|
|
|
|
|
|
def model_in_access_group(model: str, team_models: Optional[List[str]]) -> bool:
|
|
from collections import defaultdict
|
|
|
|
from litellm.proxy.proxy_server import llm_router
|
|
|
|
if team_models is None:
|
|
return True
|
|
if model in team_models:
|
|
return True
|
|
|
|
access_groups = defaultdict(list)
|
|
if llm_router:
|
|
access_groups = llm_router.get_model_access_groups()
|
|
|
|
models_in_current_access_groups = []
|
|
if len(access_groups) > 0: # check if token contains any model access groups
|
|
for idx, m in enumerate(
|
|
team_models
|
|
): # loop token models, if any of them are an access group add the access group
|
|
if m in access_groups:
|
|
# if it is an access group we need to remove it from valid_token.models
|
|
models_in_group = access_groups[m]
|
|
models_in_current_access_groups.extend(models_in_group)
|
|
|
|
# Filter out models that are access_groups
|
|
filtered_models = [m for m in team_models if m not in access_groups]
|
|
filtered_models += models_in_current_access_groups
|
|
|
|
if model in filtered_models:
|
|
return True
|
|
return False
|
|
|
|
|
|
def _should_check_db(
|
|
key: str, last_db_access_time: LimitedSizeOrderedDict, db_cache_expiry: int
|
|
) -> bool:
|
|
"""
|
|
Prevent calling db repeatedly for items that don't exist in the db.
|
|
"""
|
|
current_time = time.time()
|
|
# if key doesn't exist in last_db_access_time -> check db
|
|
if key not in last_db_access_time:
|
|
return True
|
|
elif (
|
|
last_db_access_time[key][0] is not None
|
|
): # check db for non-null values (for refresh operations)
|
|
return True
|
|
elif last_db_access_time[key][0] is None:
|
|
if current_time - last_db_access_time[key] >= db_cache_expiry:
|
|
return True
|
|
return False
|
|
|
|
|
|
def _update_last_db_access_time(
|
|
key: str, value: Optional[Any], last_db_access_time: LimitedSizeOrderedDict
|
|
):
|
|
last_db_access_time[key] = (value, time.time())
|
|
|
|
|
|
@log_db_metrics
|
|
async def get_user_object(
|
|
user_id: str,
|
|
prisma_client: Optional[PrismaClient],
|
|
user_api_key_cache: DualCache,
|
|
user_id_upsert: bool,
|
|
parent_otel_span: Optional[Span] = None,
|
|
proxy_logging_obj: Optional[ProxyLogging] = None,
|
|
) -> Optional[LiteLLM_UserTable]:
|
|
"""
|
|
- Check if user id in proxy User Table
|
|
- if valid, return LiteLLM_UserTable object with defined limits
|
|
- if not, then raise an error
|
|
"""
|
|
|
|
if user_id is None:
|
|
return None
|
|
|
|
# check if in cache
|
|
cached_user_obj = await user_api_key_cache.async_get_cache(key=user_id)
|
|
if cached_user_obj is not None:
|
|
if isinstance(cached_user_obj, dict):
|
|
return LiteLLM_UserTable(**cached_user_obj)
|
|
elif isinstance(cached_user_obj, LiteLLM_UserTable):
|
|
return cached_user_obj
|
|
# else, check db
|
|
if prisma_client is None:
|
|
raise Exception("No db connected")
|
|
try:
|
|
db_access_time_key = "user_id:{}".format(user_id)
|
|
should_check_db = _should_check_db(
|
|
key=db_access_time_key,
|
|
last_db_access_time=last_db_access_time,
|
|
db_cache_expiry=db_cache_expiry,
|
|
)
|
|
|
|
if should_check_db:
|
|
response = await prisma_client.db.litellm_usertable.find_unique(
|
|
where={"user_id": user_id}, include={"organization_memberships": True}
|
|
)
|
|
else:
|
|
response = None
|
|
|
|
if response is None:
|
|
if user_id_upsert:
|
|
response = await prisma_client.db.litellm_usertable.create(
|
|
data={"user_id": user_id},
|
|
include={"organization_memberships": True},
|
|
)
|
|
else:
|
|
raise Exception
|
|
|
|
if (
|
|
response.organization_memberships is not None
|
|
and len(response.organization_memberships) > 0
|
|
):
|
|
# dump each organization membership to type LiteLLM_OrganizationMembershipTable
|
|
_dumped_memberships = [
|
|
membership.model_dump()
|
|
for membership in response.organization_memberships
|
|
if membership is not None
|
|
]
|
|
response.organization_memberships = _dumped_memberships
|
|
|
|
_response = LiteLLM_UserTable(**dict(response))
|
|
response_dict = _response.model_dump()
|
|
|
|
# save the user object to cache
|
|
await user_api_key_cache.async_set_cache(key=user_id, value=response_dict)
|
|
|
|
# save to db access time
|
|
_update_last_db_access_time(
|
|
key=db_access_time_key,
|
|
value=response_dict,
|
|
last_db_access_time=last_db_access_time,
|
|
)
|
|
|
|
return _response
|
|
except Exception as e: # if user not in db
|
|
raise ValueError(
|
|
f"User doesn't exist in db. 'user_id'={user_id}. Create user via `/user/new` call. Got error - {e}"
|
|
)
|
|
|
|
|
|
async def _cache_management_object(
|
|
key: str,
|
|
value: BaseModel,
|
|
user_api_key_cache: DualCache,
|
|
proxy_logging_obj: Optional[ProxyLogging],
|
|
):
|
|
await user_api_key_cache.async_set_cache(key=key, value=value)
|
|
|
|
|
|
async def _cache_team_object(
|
|
team_id: str,
|
|
team_table: LiteLLM_TeamTableCachedObj,
|
|
user_api_key_cache: DualCache,
|
|
proxy_logging_obj: Optional[ProxyLogging],
|
|
):
|
|
key = "team_id:{}".format(team_id)
|
|
|
|
## CACHE REFRESH TIME!
|
|
team_table.last_refreshed_at = time.time()
|
|
|
|
await _cache_management_object(
|
|
key=key,
|
|
value=team_table,
|
|
user_api_key_cache=user_api_key_cache,
|
|
proxy_logging_obj=proxy_logging_obj,
|
|
)
|
|
|
|
|
|
async def _cache_key_object(
|
|
hashed_token: str,
|
|
user_api_key_obj: UserAPIKeyAuth,
|
|
user_api_key_cache: DualCache,
|
|
proxy_logging_obj: Optional[ProxyLogging],
|
|
):
|
|
key = hashed_token
|
|
|
|
## CACHE REFRESH TIME
|
|
user_api_key_obj.last_refreshed_at = time.time()
|
|
|
|
await _cache_management_object(
|
|
key=key,
|
|
value=user_api_key_obj,
|
|
user_api_key_cache=user_api_key_cache,
|
|
proxy_logging_obj=proxy_logging_obj,
|
|
)
|
|
|
|
|
|
async def _delete_cache_key_object(
|
|
hashed_token: str,
|
|
user_api_key_cache: DualCache,
|
|
proxy_logging_obj: Optional[ProxyLogging],
|
|
):
|
|
key = hashed_token
|
|
|
|
user_api_key_cache.delete_cache(key=key)
|
|
|
|
## UPDATE REDIS CACHE ##
|
|
if proxy_logging_obj is not None:
|
|
await proxy_logging_obj.internal_usage_cache.dual_cache.async_delete_cache(
|
|
key=key
|
|
)
|
|
|
|
|
|
@log_db_metrics
|
|
async def _get_team_db_check(team_id: str, prisma_client: PrismaClient):
|
|
return await prisma_client.db.litellm_teamtable.find_unique(
|
|
where={"team_id": team_id}
|
|
)
|
|
|
|
|
|
async def get_team_object(
|
|
team_id: str,
|
|
prisma_client: Optional[PrismaClient],
|
|
user_api_key_cache: DualCache,
|
|
parent_otel_span: Optional[Span] = None,
|
|
proxy_logging_obj: Optional[ProxyLogging] = None,
|
|
check_cache_only: Optional[bool] = None,
|
|
) -> LiteLLM_TeamTableCachedObj:
|
|
"""
|
|
- Check if team id in proxy Team Table
|
|
- if valid, return LiteLLM_TeamTable object with defined limits
|
|
- if not, then raise an error
|
|
"""
|
|
if prisma_client is None:
|
|
raise Exception(
|
|
"No DB Connected. See - https://docs.litellm.ai/docs/proxy/virtual_keys"
|
|
)
|
|
|
|
# check if in cache
|
|
key = "team_id:{}".format(team_id)
|
|
cached_team_obj: Optional[LiteLLM_TeamTableCachedObj] = None
|
|
|
|
## CHECK REDIS CACHE ##
|
|
if (
|
|
proxy_logging_obj is not None
|
|
and proxy_logging_obj.internal_usage_cache.dual_cache
|
|
):
|
|
cached_team_obj = (
|
|
await proxy_logging_obj.internal_usage_cache.dual_cache.async_get_cache(
|
|
key=key, parent_otel_span=parent_otel_span
|
|
)
|
|
)
|
|
|
|
if cached_team_obj is None:
|
|
cached_team_obj = await user_api_key_cache.async_get_cache(key=key)
|
|
|
|
if cached_team_obj is not None:
|
|
if isinstance(cached_team_obj, dict):
|
|
return LiteLLM_TeamTableCachedObj(**cached_team_obj)
|
|
elif isinstance(cached_team_obj, LiteLLM_TeamTableCachedObj):
|
|
return cached_team_obj
|
|
|
|
if check_cache_only:
|
|
raise Exception(
|
|
f"Team doesn't exist in cache + check_cache_only=True. Team={team_id}."
|
|
)
|
|
|
|
# else, check db
|
|
try:
|
|
db_access_time_key = "team_id:{}".format(team_id)
|
|
should_check_db = _should_check_db(
|
|
key=db_access_time_key,
|
|
last_db_access_time=last_db_access_time,
|
|
db_cache_expiry=db_cache_expiry,
|
|
)
|
|
if should_check_db:
|
|
response = await _get_team_db_check(
|
|
team_id=team_id, prisma_client=prisma_client
|
|
)
|
|
else:
|
|
response = None
|
|
|
|
if response is None:
|
|
raise Exception
|
|
|
|
_response = LiteLLM_TeamTableCachedObj(**response.dict())
|
|
# save the team object to cache
|
|
await _cache_team_object(
|
|
team_id=team_id,
|
|
team_table=_response,
|
|
user_api_key_cache=user_api_key_cache,
|
|
proxy_logging_obj=proxy_logging_obj,
|
|
)
|
|
|
|
# save to db access time
|
|
# save to db access time
|
|
_update_last_db_access_time(
|
|
key=db_access_time_key,
|
|
value=_response,
|
|
last_db_access_time=last_db_access_time,
|
|
)
|
|
|
|
return _response
|
|
except Exception:
|
|
raise Exception(
|
|
f"Team doesn't exist in db. Team={team_id}. Create team via `/team/new` call."
|
|
)
|
|
|
|
|
|
@log_db_metrics
|
|
async def get_key_object(
|
|
hashed_token: str,
|
|
prisma_client: Optional[PrismaClient],
|
|
user_api_key_cache: DualCache,
|
|
parent_otel_span: Optional[Span] = None,
|
|
proxy_logging_obj: Optional[ProxyLogging] = None,
|
|
check_cache_only: Optional[bool] = None,
|
|
) -> UserAPIKeyAuth:
|
|
"""
|
|
- Check if team id in proxy Team Table
|
|
- if valid, return LiteLLM_TeamTable object with defined limits
|
|
- if not, then raise an error
|
|
"""
|
|
if prisma_client is None:
|
|
raise Exception(
|
|
"No DB Connected. See - https://docs.litellm.ai/docs/proxy/virtual_keys"
|
|
)
|
|
|
|
# check if in cache
|
|
key = hashed_token
|
|
|
|
cached_key_obj: Optional[UserAPIKeyAuth] = await user_api_key_cache.async_get_cache(
|
|
key=key
|
|
)
|
|
|
|
if cached_key_obj is not None:
|
|
if isinstance(cached_key_obj, dict):
|
|
return UserAPIKeyAuth(**cached_key_obj)
|
|
elif isinstance(cached_key_obj, UserAPIKeyAuth):
|
|
return cached_key_obj
|
|
|
|
if check_cache_only:
|
|
raise Exception(
|
|
f"Key doesn't exist in cache + check_cache_only=True. key={key}."
|
|
)
|
|
|
|
# else, check db
|
|
try:
|
|
_valid_token: Optional[BaseModel] = await prisma_client.get_data(
|
|
token=hashed_token,
|
|
table_name="combined_view",
|
|
parent_otel_span=parent_otel_span,
|
|
proxy_logging_obj=proxy_logging_obj,
|
|
)
|
|
|
|
if _valid_token is None:
|
|
raise Exception
|
|
|
|
_response = UserAPIKeyAuth(**_valid_token.model_dump(exclude_none=True))
|
|
|
|
# save the key object to cache
|
|
await _cache_key_object(
|
|
hashed_token=hashed_token,
|
|
user_api_key_obj=_response,
|
|
user_api_key_cache=user_api_key_cache,
|
|
proxy_logging_obj=proxy_logging_obj,
|
|
)
|
|
|
|
return _response
|
|
except httpx.ConnectError as e:
|
|
return await _handle_failed_db_connection_for_get_key_object(e=e)
|
|
except Exception:
|
|
raise Exception(
|
|
f"Key doesn't exist in db. key={hashed_token}. Create key via `/key/generate` call."
|
|
)
|
|
|
|
|
|
async def _handle_failed_db_connection_for_get_key_object(
|
|
e: Exception,
|
|
) -> UserAPIKeyAuth:
|
|
"""
|
|
Handles httpx.ConnectError when reading a Virtual Key from LiteLLM DB
|
|
|
|
Use this if you don't want failed DB queries to block LLM API reqiests
|
|
|
|
Returns:
|
|
- UserAPIKeyAuth: If general_settings.allow_requests_on_db_unavailable is True
|
|
|
|
Raises:
|
|
- Orignal Exception in all other cases
|
|
"""
|
|
from litellm.proxy.proxy_server import (
|
|
general_settings,
|
|
litellm_proxy_admin_name,
|
|
proxy_logging_obj,
|
|
)
|
|
|
|
# If this flag is on, requests failing to connect to the DB will be allowed
|
|
if general_settings.get("allow_requests_on_db_unavailable", False) is True:
|
|
# log this as a DB failure on prometheus
|
|
proxy_logging_obj.service_logging_obj.service_failure_hook(
|
|
service=ServiceTypes.DB,
|
|
call_type="get_key_object",
|
|
error=e,
|
|
duration=0.0,
|
|
)
|
|
|
|
return UserAPIKeyAuth(
|
|
key_name="failed-to-connect-to-db",
|
|
token="failed-to-connect-to-db",
|
|
user_id=litellm_proxy_admin_name,
|
|
)
|
|
else:
|
|
# raise the original exception, the wrapper on `get_key_object` handles logging db failure to prometheus
|
|
raise e
|
|
|
|
|
|
@log_db_metrics
|
|
async def get_org_object(
|
|
org_id: str,
|
|
prisma_client: Optional[PrismaClient],
|
|
user_api_key_cache: DualCache,
|
|
parent_otel_span: Optional[Span] = None,
|
|
proxy_logging_obj: Optional[ProxyLogging] = None,
|
|
):
|
|
"""
|
|
- Check if org id in proxy Org Table
|
|
- if valid, return LiteLLM_OrganizationTable object
|
|
- if not, then raise an error
|
|
"""
|
|
if prisma_client is None:
|
|
raise Exception(
|
|
"No DB Connected. See - https://docs.litellm.ai/docs/proxy/virtual_keys"
|
|
)
|
|
|
|
# check if in cache
|
|
cached_org_obj = user_api_key_cache.async_get_cache(key="org_id:{}".format(org_id))
|
|
if cached_org_obj is not None:
|
|
if isinstance(cached_org_obj, dict):
|
|
return cached_org_obj
|
|
elif isinstance(cached_org_obj, LiteLLM_OrganizationTable):
|
|
return cached_org_obj
|
|
# else, check db
|
|
try:
|
|
response = await prisma_client.db.litellm_organizationtable.find_unique(
|
|
where={"organization_id": org_id}
|
|
)
|
|
|
|
if response is None:
|
|
raise Exception
|
|
|
|
return response
|
|
except Exception:
|
|
raise Exception(
|
|
f"Organization doesn't exist in db. Organization={org_id}. Create organization via `/organization/new` call."
|
|
)
|
|
|
|
|
|
async def can_key_call_model(
|
|
model: str, llm_model_list: Optional[list], valid_token: UserAPIKeyAuth
|
|
) -> Literal[True]:
|
|
"""
|
|
Checks if token can call a given model
|
|
|
|
Returns:
|
|
- True: if token allowed to call model
|
|
|
|
Raises:
|
|
- Exception: If token not allowed to call model
|
|
"""
|
|
if model in litellm.model_alias_map:
|
|
model = litellm.model_alias_map[model]
|
|
|
|
## check if model in allowed model names
|
|
verbose_proxy_logger.debug(
|
|
f"LLM Model List pre access group check: {llm_model_list}"
|
|
)
|
|
from collections import defaultdict
|
|
|
|
from litellm.proxy.proxy_server import llm_router
|
|
|
|
access_groups = defaultdict(list)
|
|
if llm_router:
|
|
access_groups = llm_router.get_model_access_groups()
|
|
|
|
models_in_current_access_groups = []
|
|
if len(access_groups) > 0: # check if token contains any model access groups
|
|
for idx, m in enumerate(
|
|
valid_token.models
|
|
): # loop token models, if any of them are an access group add the access group
|
|
if m in access_groups:
|
|
# if it is an access group we need to remove it from valid_token.models
|
|
models_in_group = access_groups[m]
|
|
models_in_current_access_groups.extend(models_in_group)
|
|
|
|
# Filter out models that are access_groups
|
|
filtered_models = [m for m in valid_token.models if m not in access_groups]
|
|
|
|
filtered_models += models_in_current_access_groups
|
|
verbose_proxy_logger.debug(f"model: {model}; allowed_models: {filtered_models}")
|
|
|
|
all_model_access: bool = False
|
|
|
|
if (
|
|
len(filtered_models) == 0
|
|
or "*" in filtered_models
|
|
or "openai/*" in filtered_models
|
|
):
|
|
all_model_access = True
|
|
|
|
if model is not None and model not in filtered_models and all_model_access is False:
|
|
raise ValueError(
|
|
f"API Key not allowed to access model. This token can only access models={valid_token.models}. Tried to access {model}"
|
|
)
|
|
valid_token.models = filtered_models
|
|
verbose_proxy_logger.debug(
|
|
f"filtered allowed_models: {filtered_models}; valid_token.models: {valid_token.models}"
|
|
)
|
|
return True
|