mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
formatting improvements
This commit is contained in:
parent
3e0a16acf4
commit
a69b7ffcfa
17 changed files with 464 additions and 323 deletions
|
@ -1,20 +1,21 @@
|
|||
|
||||
###### LiteLLM Integration with GPT Cache #########
|
||||
import gptcache
|
||||
|
||||
# openai.ChatCompletion._llm_handler = litellm.completion
|
||||
from gptcache.adapter import openai
|
||||
import litellm
|
||||
|
||||
|
||||
class LiteLLMChatCompletion(gptcache.adapter.openai.ChatCompletion):
|
||||
@classmethod
|
||||
def _llm_handler(cls, *llm_args, **llm_kwargs):
|
||||
return litellm.completion(*llm_args, **llm_kwargs)
|
||||
|
||||
|
||||
|
||||
completion = LiteLLMChatCompletion.create
|
||||
###### End of LiteLLM Integration with GPT Cache #########
|
||||
|
||||
|
||||
|
||||
# ####### Example usage ###############
|
||||
# from gptcache import cache
|
||||
# completion = LiteLLMChatCompletion.create
|
||||
|
@ -23,9 +24,3 @@ completion = LiteLLMChatCompletion.create
|
|||
# cache.set_openai_key()
|
||||
# result = completion(model="claude-2", messages=[{"role": "user", "content": "cto of litellm"}])
|
||||
# print(result)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue