diff --git a/litellm/tests/example_config_yaml/cache_no_params.yaml b/litellm/tests/example_config_yaml/cache_no_params.yaml new file mode 100644 index 000000000..20ed919dd --- /dev/null +++ b/litellm/tests/example_config_yaml/cache_no_params.yaml @@ -0,0 +1,7 @@ +model_list: + - model_name: "openai-model" + litellm_params: + model: "gpt-3.5-turbo" + +litellm_settings: + cache: True diff --git a/litellm/tests/example_config_yaml/cache_with_params.yaml b/litellm/tests/example_config_yaml/cache_with_params.yaml new file mode 100644 index 000000000..372151d0c --- /dev/null +++ b/litellm/tests/example_config_yaml/cache_with_params.yaml @@ -0,0 +1,10 @@ +model_list: + - model_name: "openai-model" + litellm_params: + model: "gpt-3.5-turbo" + +litellm_settings: + cache: True + cache_params: + supported_call_types: ["embedding", "aembedding"] + host: "localhost" \ No newline at end of file diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 9de25c298..5e9854f43 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -256,6 +256,29 @@ def test_load_router_config(): print(result) assert len(result[1]) == 2 + # tests for litellm.cache set from config + print("testing reading proxy config for cache") + litellm.cache = None + load_router_config( + router=None, + config_file_path=f"{filepath}/example_config_yaml/cache_no_params.yaml" + ) + assert litellm.cache is not None + assert "redis_client" in vars(litellm.cache.cache) # it should default to redis on proxy + assert litellm.cache.supported_call_types == ['completion', 'acompletion', 'embedding', 'aembedding'] # init with all call types + + print("testing reading proxy config for cache with params") + load_router_config( + router=None, + config_file_path=f"{filepath}/example_config_yaml/cache_with_params.yaml" + ) + assert litellm.cache is not None + print(litellm.cache) + print(litellm.cache.supported_call_types) + print(vars(litellm.cache.cache)) + assert "redis_client" in vars(litellm.cache.cache) # it should default to redis on proxy + assert litellm.cache.supported_call_types == ['embedding', 'aembedding'] # init with all call types + except Exception as e: pytest.fail("Proxy: Got exception reading config", e) # test_load_router_config() \ No newline at end of file