diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 3f89800a7..932f8294d 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 4e7d6c2d1..643e64add 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/main.py b/litellm/main.py index 83889f849..a17dfc8a5 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -755,6 +755,10 @@ def completion( model=model, custom_llm_provider=custom_llm_provider, original_exception=e ) +def completion_with_retries(*args, **kwargs): + import tenacity + retryer = tenacity.Retrying(stop=tenacity.stop_after_attempt(3), reraise=True) + return retryer(completion, *args, **kwargs) def batch_completion(*args, **kwargs): batch_messages = args[1] if len(args) > 1 else kwargs.get("messages") diff --git a/litellm/tests/test_completion_with_retries.py b/litellm/tests/test_completion_with_retries.py new file mode 100644 index 000000000..4d3d55399 --- /dev/null +++ b/litellm/tests/test_completion_with_retries.py @@ -0,0 +1,86 @@ +# import sys, os +# import traceback +# from dotenv import load_dotenv + +# load_dotenv() +# import os + +# sys.path.insert( +# 0, os.path.abspath("../..") +# ) # Adds the parent directory to the system path +# import pytest +# import litellm +# from litellm import completion_with_retries +# from litellm import ( +# AuthenticationError, +# InvalidRequestError, +# RateLimitError, +# ServiceUnavailableError, +# OpenAIError, +# ) + +# user_message = "Hello, whats the weather in San Francisco??" +# messages = [{"content": user_message, "role": "user"}] + + +# def logger_fn(user_model_dict): +# # print(f"user_model_dict: {user_model_dict}") +# pass + +# # normal call +# def test_completion_custom_provider_model_name(): +# try: +# response = completion_with_retries( +# model="together_ai/togethercomputer/llama-2-70b-chat", +# messages=messages, +# logger_fn=logger_fn, +# ) +# # Add any assertions here to check the response +# print(response) +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # bad call +# # def test_completion_custom_provider_model_name(): +# # try: +# # response = completion_with_retries( +# # model="bad-model", +# # messages=messages, +# # logger_fn=logger_fn, +# # ) +# # # Add any assertions here to check the response +# # print(response) +# # except Exception as e: +# # pytest.fail(f"Error occurred: {e}") + +# # impact on exception mapping +# def test_context_window(): +# sample_text = "how does a court case get to the Supreme Court?" * 5000 +# messages = [{"content": sample_text, "role": "user"}] +# try: +# model = "chatgpt-test" +# response = completion_with_retries( +# model=model, +# messages=messages, +# custom_llm_provider="azure", +# logger_fn=logger_fn, +# ) +# print(f"response: {response}") +# except InvalidRequestError as e: +# print(f"InvalidRequestError: {e.llm_provider}") +# return +# except OpenAIError as e: +# print(f"OpenAIError: {e.llm_provider}") +# return +# except Exception as e: +# print("Uncaught Error in test_context_window") +# print(f"Error Type: {type(e).__name__}") +# print(f"Uncaught Exception - {e}") +# pytest.fail(f"Error occurred: {e}") +# return + + +# test_context_window() + +# test_completion_custom_provider_model_name() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index be3dc2a82..fce863ff5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.460" +version = "0.1.461" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"