mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix - locking in memory leads to failing tests
This commit is contained in:
parent
fb98bd3a4a
commit
93c20b8d2d
4 changed files with 25 additions and 38 deletions
|
@ -172,7 +172,6 @@ class DBSpendUpdateWriter:
|
|||
f"track_cost_callback: {entity_type.value}_id is None. Not tracking spend for {entity_type.value}"
|
||||
)
|
||||
return False
|
||||
async with prisma_client.in_memory_transaction_lock:
|
||||
transaction_list[entity_id] = response_cost + transaction_list.get(
|
||||
entity_id, 0
|
||||
)
|
||||
|
|
|
@ -80,7 +80,6 @@ class RedisUpdateBuffer:
|
|||
"redis_cache is None, skipping store_in_memory_spend_updates_in_redis"
|
||||
)
|
||||
return
|
||||
async with prisma_client.in_memory_transaction_lock:
|
||||
db_spend_update_transactions: DBSpendUpdateTransactions = (
|
||||
DBSpendUpdateTransactions(
|
||||
user_list_transactions=prisma_client.user_list_transactions,
|
||||
|
@ -94,9 +93,7 @@ class RedisUpdateBuffer:
|
|||
|
||||
# only store in redis if there are any updates to commit
|
||||
if (
|
||||
self._number_of_transactions_to_store_in_redis(
|
||||
db_spend_update_transactions
|
||||
)
|
||||
self._number_of_transactions_to_store_in_redis(db_spend_update_transactions)
|
||||
== 0
|
||||
):
|
||||
return
|
||||
|
|
|
@ -5,11 +5,3 @@ model_list:
|
|||
api_key: fake-key
|
||||
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||
|
||||
general_settings:
|
||||
use_redis_transaction_buffer: True
|
||||
|
||||
litellm_settings:
|
||||
cache: true
|
||||
cache_params:
|
||||
type: redis
|
||||
supported_call_types: []
|
||||
|
|
|
@ -1121,7 +1121,6 @@ class PrismaClient:
|
|||
self.iam_token_db_auth: Optional[bool] = str_to_bool(
|
||||
os.getenv("IAM_TOKEN_DB_AUTH")
|
||||
)
|
||||
self.in_memory_transaction_lock = asyncio.Lock()
|
||||
verbose_proxy_logger.debug("Creating Prisma Client..")
|
||||
try:
|
||||
from prisma import Prisma # type: ignore
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue