forked from phoenix/litellm-mirror
gpt_cache integration v0
This commit is contained in:
parent
72e12895ff
commit
04281aaf2a
1 changed files with 31 additions and 0 deletions
31
litellm/cache.py
Normal file
31
litellm/cache.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
|
||||
###### LiteLLM Integration with GPT Cache #########
|
||||
import gptcache
|
||||
# openai.ChatCompletion._llm_handler = litellm.completion
|
||||
from gptcache.adapter import openai
|
||||
import litellm
|
||||
|
||||
class LiteLLMChatCompletion(gptcache.adapter.openai.ChatCompletion):
|
||||
@classmethod
|
||||
def _llm_handler(cls, *llm_args, **llm_kwargs):
|
||||
return litellm.completion(*llm_args, **llm_kwargs)
|
||||
|
||||
completion = LiteLLMChatCompletion.create
|
||||
###### End of LiteLLM Integration with GPT Cache #########
|
||||
|
||||
|
||||
|
||||
# ####### Example usage ###############
|
||||
# from gptcache import cache
|
||||
# completion = LiteLLMChatCompletion.create
|
||||
# # set API keys in .env / os.environ
|
||||
# cache.init()
|
||||
# cache.set_openai_key()
|
||||
# result = completion(model="claude-2", messages=[{"role": "user", "content": "cto of litellm"}])
|
||||
# print(result)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue