mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix(proxy_server.py): fix key caching logic
This commit is contained in:
parent
8f6af575e7
commit
3232feb123
5 changed files with 214 additions and 75 deletions
|
@ -1596,7 +1596,6 @@ async def _cache_user_row(
|
|||
Check if a user_id exists in cache,
|
||||
if not retrieve it.
|
||||
"""
|
||||
print_verbose(f"Prisma: _cache_user_row, user_id: {user_id}")
|
||||
cache_key = f"{user_id}_user_api_key_user_id"
|
||||
response = cache.get_cache(key=cache_key)
|
||||
if response is None: # Cache miss
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue