litellm-mirror/litellm/tests/example_config_yaml/cache_with_params.yaml
2023-12-16 14:45:06 +05:30

10 lines
No EOL
218 B
YAML

model_list:
- model_name: "openai-model"
litellm_params:
model: "gpt-3.5-turbo"
litellm_settings:
cache: True
cache_params:
supported_call_types: ["embedding", "aembedding"]
host: "localhost"