forked from phoenix/litellm-mirror
(fix) cache configs
This commit is contained in:
parent
d7f292c108
commit
3c30705b76
1 changed files with 8 additions and 2 deletions
|
@ -138,6 +138,7 @@ log_file = "api_log.json"
|
||||||
worker_config = None
|
worker_config = None
|
||||||
master_key = None
|
master_key = None
|
||||||
prisma_client = None
|
prisma_client = None
|
||||||
|
config_cache = {}
|
||||||
### REDIS QUEUE ###
|
### REDIS QUEUE ###
|
||||||
async_result = None
|
async_result = None
|
||||||
celery_app_conn = None
|
celery_app_conn = None
|
||||||
|
@ -158,13 +159,18 @@ def usage_telemetry(
|
||||||
).start()
|
).start()
|
||||||
|
|
||||||
async def user_api_key_auth(request: Request):
|
async def user_api_key_auth(request: Request):
|
||||||
global master_key, prisma_client
|
global master_key, prisma_client, config_cache, llm_model_list
|
||||||
if master_key is None:
|
if master_key is None:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
api_key = await oauth2_scheme(request=request)
|
api_key = await oauth2_scheme(request=request)
|
||||||
if api_key == master_key:
|
if api_key == master_key:
|
||||||
return
|
return
|
||||||
|
if api_key in config_cache:
|
||||||
|
model_list = config.get("model_list", [])
|
||||||
|
llm_model_list = model_list
|
||||||
|
return
|
||||||
|
|
||||||
if prisma_client:
|
if prisma_client:
|
||||||
valid_token = await prisma_client.litellm_verificationtoken.find_first(
|
valid_token = await prisma_client.litellm_verificationtoken.find_first(
|
||||||
where={
|
where={
|
||||||
|
@ -176,9 +182,9 @@ async def user_api_key_auth(request: Request):
|
||||||
litellm.model_alias_map = valid_token.aliases
|
litellm.model_alias_map = valid_token.aliases
|
||||||
config = valid_token.config
|
config = valid_token.config
|
||||||
if config != {}:
|
if config != {}:
|
||||||
global llm_model_list
|
|
||||||
model_list = config.get("model_list", [])
|
model_list = config.get("model_list", [])
|
||||||
llm_model_list = model_list
|
llm_model_list = model_list
|
||||||
|
config_cache[api_key] = config
|
||||||
print("\n new llm router model list", llm_model_list)
|
print("\n new llm router model list", llm_model_list)
|
||||||
if len(valid_token.models) == 0: # assume an empty model list means all models are allowed to be called
|
if len(valid_token.models) == 0: # assume an empty model list means all models are allowed to be called
|
||||||
return
|
return
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue