diff --git a/litellm/main.py b/litellm/main.py index dc4cf0001..9a06250a9 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2535,6 +2535,7 @@ def batch_completion( list: A list of completion results. """ args = locals() + batch_messages = messages completions = [] model = model @@ -2588,7 +2589,15 @@ def batch_completion( completions.append(future) # Retrieve the results from the futures - results = [future.result() for future in completions] + # results = [future.result() for future in completions] + # return exceptions if any + results = [] + for future in completions: + try: + results.append(future.result()) + except Exception as exc: + results.append(exc) + return results diff --git a/litellm/tests/test_batch_completion_return_exceptions.py b/litellm/tests/test_batch_completion_return_exceptions.py new file mode 100644 index 000000000..4e285771f --- /dev/null +++ b/litellm/tests/test_batch_completion_return_exceptions.py @@ -0,0 +1,17 @@ +"""https://github.com/BerriAI/litellm/pull/3397/commits/a7ec1772b1457594d3af48cdcb0a382279b841c7#diff-44852387ceb00aade916d6b314dfd5d180499e54f35209ae9c07179febe08b4b.""" +"""Test batch_completion's return_exceptions.""" +import litellm + +msg1 = [{"role": "user", "content": "hi 1"}] +msg2 = [{"role": "user", "content": "hi 2"}] + + +def test_batch_completion_return_exceptions_true(): + """Test batch_completion's return_exceptions.""" + res = litellm.batch_completion( + model="gpt-3.5-turbo", + messages=[msg1, msg2], + api_key="sk_xxx", # deliberately set invalid key + ) + + assert isinstance(res[0], litellm.exceptions.AuthenticationError) \ No newline at end of file