diff --git a/dist/litellm-1.0.0.dev1-py3-none-any.whl b/dist/litellm-1.0.0.dev1-py3-none-any.whl new file mode 100644 index 0000000000..8763ee52ab Binary files /dev/null and b/dist/litellm-1.0.0.dev1-py3-none-any.whl differ diff --git a/dist/litellm-1.0.0.dev1.tar.gz b/dist/litellm-1.0.0.dev1.tar.gz new file mode 100644 index 0000000000..850fa7e5de Binary files /dev/null and b/dist/litellm-1.0.0.dev1.tar.gz differ diff --git a/litellm/caching.py b/litellm/caching.py index 9632a6b03b..ba27820da6 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -11,7 +11,6 @@ import litellm import time, logging import json, traceback - def get_prompt(*args, **kwargs): # make this safe checks, it should not throw any exceptions if len(args) > 1: @@ -222,6 +221,8 @@ class Cache: else: cache_key = self.get_cache_key(*args, **kwargs) if cache_key is not None: + if isinstance(result, litellm.ModelResponse): + result = result.model_dump_json() self.cache.set_cache(cache_key, result, **kwargs) except: pass diff --git a/litellm/tests/test_text_completion.py b/litellm/tests/test_text_completion.py index b31e01682f..1521d9677a 100644 --- a/litellm/tests/test_text_completion.py +++ b/litellm/tests/test_text_completion.py @@ -31,7 +31,7 @@ def test_completion_openai_prompt(): #print(response.choices[0].text) except Exception as e: pytest.fail(f"Error occurred: {e}") -# test_completion_openai_prompt() +test_completion_openai_prompt() def test_completion_chatgpt_prompt():