mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix caching
This commit is contained in:
parent
1799f1bfe9
commit
83aa304c34
3 changed files with 2 additions and 2 deletions
26
litellm/gpt_cache.py
Normal file
26
litellm/gpt_cache.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
###### LiteLLM Integration with GPT Cache #########
|
||||
import gptcache
|
||||
|
||||
# openai.ChatCompletion._llm_handler = litellm.completion
|
||||
from gptcache.adapter import openai
|
||||
import litellm
|
||||
|
||||
|
||||
class LiteLLMChatCompletion(gptcache.adapter.openai.ChatCompletion):
|
||||
@classmethod
|
||||
def _llm_handler(cls, *llm_args, **llm_kwargs):
|
||||
return litellm.completion(*llm_args, **llm_kwargs)
|
||||
|
||||
|
||||
completion = LiteLLMChatCompletion.create
|
||||
###### End of LiteLLM Integration with GPT Cache #########
|
||||
|
||||
|
||||
# ####### Example usage ###############
|
||||
# from gptcache import cache
|
||||
# completion = LiteLLMChatCompletion.create
|
||||
# # set API keys in .env / os.environ
|
||||
# cache.init()
|
||||
# cache.set_openai_key()
|
||||
# result = completion(model="claude-2", messages=[{"role": "user", "content": "cto of litellm"}])
|
||||
# print(result)
|
Loading…
Add table
Add a link
Reference in a new issue