mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Litellm dev 02 10 2025 p2 (#8443)
* Fixed issue #8246 (#8250) * Fixed issue #8246 * Added unit tests for discard() and for remove_callback_from_list_by_object() * fix(openai.py): support dynamic passing of organization param to openai handles scenario where client-side org id is passed to openai --------- Co-authored-by: Erez Hadad <erezh@il.ibm.com>
This commit is contained in:
parent
47f46f92c8
commit
e26d7df91b
9 changed files with 112 additions and 5 deletions
|
@ -918,6 +918,31 @@ def test_flush_cache(model_list):
|
|||
assert router.cache.get_cache("test") is None
|
||||
|
||||
|
||||
def test_discard(model_list):
|
||||
"""
|
||||
Test that discard properly removes a Router from the callback lists
|
||||
"""
|
||||
litellm.callbacks = []
|
||||
litellm.success_callback = []
|
||||
litellm._async_success_callback = []
|
||||
litellm.failure_callback = []
|
||||
litellm._async_failure_callback = []
|
||||
litellm.input_callback = []
|
||||
litellm.service_callback = []
|
||||
|
||||
router = Router(model_list=model_list)
|
||||
router.discard()
|
||||
|
||||
# Verify all callback lists are empty
|
||||
assert len(litellm.callbacks) == 0
|
||||
assert len(litellm.success_callback) == 0
|
||||
assert len(litellm.failure_callback) == 0
|
||||
assert len(litellm._async_success_callback) == 0
|
||||
assert len(litellm._async_failure_callback) == 0
|
||||
assert len(litellm.input_callback) == 0
|
||||
assert len(litellm.service_callback) == 0
|
||||
|
||||
|
||||
def test_initialize_assistants_endpoint(model_list):
|
||||
"""Test if the 'initialize_assistants_endpoint' function is working correctly"""
|
||||
router = Router(model_list=model_list)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue