(test) proxy - cache config

This commit is contained in:
ishaan-jaff 2023-12-16 14:45:06 +05:30
parent 6b7d0eada4
commit ed0b5d29b0
3 changed files with 40 additions and 0 deletions

View file

@ -0,0 +1,7 @@
model_list:
- model_name: "openai-model"
litellm_params:
model: "gpt-3.5-turbo"
litellm_settings:
cache: True

View file

@ -0,0 +1,10 @@
model_list:
- model_name: "openai-model"
litellm_params:
model: "gpt-3.5-turbo"
litellm_settings:
cache: True
cache_params:
supported_call_types: ["embedding", "aembedding"]
host: "localhost"

View file

@ -256,6 +256,29 @@ def test_load_router_config():
print(result)
assert len(result[1]) == 2
# tests for litellm.cache set from config
print("testing reading proxy config for cache")
litellm.cache = None
load_router_config(
router=None,
config_file_path=f"{filepath}/example_config_yaml/cache_no_params.yaml"
)
assert litellm.cache is not None
assert "redis_client" in vars(litellm.cache.cache) # it should default to redis on proxy
assert litellm.cache.supported_call_types == ['completion', 'acompletion', 'embedding', 'aembedding'] # init with all call types
print("testing reading proxy config for cache with params")
load_router_config(
router=None,
config_file_path=f"{filepath}/example_config_yaml/cache_with_params.yaml"
)
assert litellm.cache is not None
print(litellm.cache)
print(litellm.cache.supported_call_types)
print(vars(litellm.cache.cache))
assert "redis_client" in vars(litellm.cache.cache) # it should default to redis on proxy
assert litellm.cache.supported_call_types == ['embedding', 'aembedding'] # init with all call types
except Exception as e:
pytest.fail("Proxy: Got exception reading config", e)
# test_load_router_config()