forked from phoenix/litellm-mirror
test(test_least_busy_routing.py): fix test init
This commit is contained in:
parent
535a547b66
commit
2acd086596
1 changed files with 2 additions and 2 deletions
|
@ -22,7 +22,7 @@ from litellm.caching import DualCache
|
||||||
|
|
||||||
def test_model_added():
|
def test_model_added():
|
||||||
test_cache = DualCache()
|
test_cache = DualCache()
|
||||||
least_busy_logger = LeastBusyLoggingHandler(router_cache=test_cache)
|
least_busy_logger = LeastBusyLoggingHandler(router_cache=test_cache, model_list=[])
|
||||||
kwargs = {
|
kwargs = {
|
||||||
"litellm_params": {
|
"litellm_params": {
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -39,7 +39,7 @@ def test_model_added():
|
||||||
|
|
||||||
def test_get_available_deployments():
|
def test_get_available_deployments():
|
||||||
test_cache = DualCache()
|
test_cache = DualCache()
|
||||||
least_busy_logger = LeastBusyLoggingHandler(router_cache=test_cache)
|
least_busy_logger = LeastBusyLoggingHandler(router_cache=test_cache, model_list=[])
|
||||||
model_group = "gpt-3.5-turbo"
|
model_group = "gpt-3.5-turbo"
|
||||||
deployment = "azure/chatgpt-v-2"
|
deployment = "azure/chatgpt-v-2"
|
||||||
kwargs = {
|
kwargs = {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue