mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
(fix) caching - get_cache_key - dont use set
This commit is contained in:
parent
ee6b936377
commit
a8e12661c2
1 changed files with 4 additions and 2 deletions
|
@ -232,8 +232,10 @@ class Cache:
|
|||
|
||||
# sort kwargs by keys, since model: [gpt-4, temperature: 0.2, max_tokens: 200] == [temperature: 0.2, max_tokens: 200, model: gpt-4]
|
||||
completion_kwargs = ["model", "messages", "temperature", "top_p", "n", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "response_format", "seed", "tools", "tool_choice"]
|
||||
embedding_kwargs = ["model", "input", "user", "encoding_format"]
|
||||
combined_kwargs = list(set(completion_kwargs + embedding_kwargs))
|
||||
embedding_only_kwargs = ["input", "encoding_format"] # embedding kwargs = model, input, user, encoding_format. Model, user are checked in completion_kwargs
|
||||
|
||||
# combined_kwargs - NEEDS to be ordered across get_cache_key(). Do not use a set()
|
||||
combined_kwargs = completion_kwargs + embedding_only_kwargs
|
||||
for param in combined_kwargs:
|
||||
# ignore litellm params here
|
||||
if param in kwargs:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue