diff --git a/litellm/tests/test_batch_completions.py b/litellm/tests/test_batch_completions.py index 560a4a3d9..03c5c2910 100644 --- a/litellm/tests/test_batch_completions.py +++ b/litellm/tests/test_batch_completions.py @@ -1,15 +1,15 @@ -# #### What this tests #### -# # This tests calling batch_completions by running 100 messages together +#### What this tests #### +# This tests calling batch_completions by running 100 messages together -# import sys, os -# import traceback -# import pytest -# sys.path.insert( -# 0, os.path.abspath("../..") -# ) # Adds the parent directory to the system path -# from openai.error import Timeout -# import litellm -# from litellm import batch_completion +import sys, os +import traceback +import pytest +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +from openai.error import Timeout +import litellm +from litellm import batch_completion, batch_completion_models, completion # litellm.set_verbose=True # def test_batch_completions(): @@ -22,4 +22,27 @@ # except Timeout as e: # pass # except Exception as e: -# pytest.fail(f"An error occurred: {e}") \ No newline at end of file +# pytest.fail(f"An error occurred: {e}") + +def test_batch_completions_models(): + try: + result = batch_completion_models( + models=["gpt-3.5-turbo", "claude-instant-1.2", "command-nightly"], + messages=[{"role": "user", "content": "Hey, how's it going"}] + ) + print(result) + except Exception as e: + pytest.fail(f"An error occurred: {e}") +# test_batch_completions_models() + + +# def test_batch_completions(): +# try: +# result = completion( +# model=["gpt-3.5-turbo", "claude-instant-1.2", "command-nightly"], +# messages=[{"role": "user", "content": "Hey, how's it going"}] +# ) +# print(result) +# except Exception as e: +# pytest.fail(f"An error occurred: {e}") +# test_batch_completions()