diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 4959766b6..1490fcfea 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index de7dd5e92..71c54c183 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/__pycache__/test_bad_params.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_bad_params.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 000000000..1e3c4bc7d Binary files /dev/null and b/litellm/tests/__pycache__/test_bad_params.cpython-311-pytest-7.4.0.pyc differ diff --git a/litellm/tests/__pycache__/test_client.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_client.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 000000000..3bc5a08bb Binary files /dev/null and b/litellm/tests/__pycache__/test_client.cpython-311-pytest-7.4.0.pyc differ diff --git a/litellm/tests/__pycache__/test_completion.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_completion.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 000000000..2baa7bc5f Binary files /dev/null and b/litellm/tests/__pycache__/test_completion.cpython-311-pytest-7.4.0.pyc differ diff --git a/litellm/tests/__pycache__/test_exceptions.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_exceptions.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 000000000..0e69bc88d Binary files /dev/null and b/litellm/tests/__pycache__/test_exceptions.cpython-311-pytest-7.4.0.pyc differ diff --git a/litellm/tests/__pycache__/test_logging.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_logging.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 000000000..9f71ef3a1 Binary files /dev/null and b/litellm/tests/__pycache__/test_logging.cpython-311-pytest-7.4.0.pyc differ diff --git a/litellm/tests/__pycache__/test_model_fallback.cpython-311-pytest-7.4.0.pyc b/litellm/tests/__pycache__/test_model_fallback.cpython-311-pytest-7.4.0.pyc new file mode 100644 index 000000000..864247d09 Binary files /dev/null and b/litellm/tests/__pycache__/test_model_fallback.cpython-311-pytest-7.4.0.pyc differ diff --git a/litellm/tests/test_bad_params.py b/litellm/tests/test_bad_params.py index dd1e8d509..53872e5fa 100644 --- a/litellm/tests/test_bad_params.py +++ b/litellm/tests/test_bad_params.py @@ -26,7 +26,7 @@ litellm.failure_callback = ["slack", "sentry", "posthog"] user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] -model_val = "krrish is a model" +model_val = None def test_completion_with_empty_model(): @@ -35,4 +35,4 @@ def test_completion_with_empty_model(): response = completion(model=model_val, messages=messages) except Exception as e: print(f"error occurred: {e}") - pass + pass \ No newline at end of file diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index adb55a45e..b9bbbebe4 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -1,5 +1,8 @@ import sys, os import traceback +from dotenv import load_dotenv +load_dotenv() +import os sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path import pytest import litellm diff --git a/litellm/tests/test_logging.py b/litellm/tests/test_logging.py index 21e4a879c..dbacf8b47 100644 --- a/litellm/tests/test_logging.py +++ b/litellm/tests/test_logging.py @@ -7,7 +7,9 @@ sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the import litellm from litellm import embedding, completion -litellm.set_verbose = True +litellm.set_verbose = False + +score = 0 def logger_fn(model_call_object: dict): print(f"model call details: {model_call_object}") @@ -18,6 +20,7 @@ messages = [{ "content": user_message,"role": "user"}] # test on openai completion call try: response = completion(model="gpt-3.5-turbo", messages=messages) + score +=1 except: print(f"error occurred: {traceback.format_exc()}") pass @@ -25,6 +28,7 @@ except: # test on non-openai completion call try: response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) + score +=1 except: print(f"error occurred: {traceback.format_exc()}") pass @@ -32,20 +36,23 @@ except: # test on openai embedding call try: response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") + score +=1 except: traceback.print_exc() # test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model try: response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") except: + score +=1 # expect this to fail traceback.print_exc() # test on good azure openai embedding call try: response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") + score +=1 except: traceback.print_exc() + + +print(f"Score: {score}, Overall score: {score/5}") \ No newline at end of file diff --git a/litellm/tests/test_model_fallback.py b/litellm/tests/test_model_fallback.py index b389e9f6a..69dc1f68d 100644 --- a/litellm/tests/test_model_fallback.py +++ b/litellm/tests/test_model_fallback.py @@ -12,7 +12,7 @@ litellm.failure_callback = ["slack", "sentry", "posthog"] litellm.set_verbose = True -model_fallback_list = ["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", "chatgpt-test"] +model_fallback_list = ["claude-instant-1", "gpt-3.5-turbo", "chatgpt-test"] user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] @@ -21,6 +21,5 @@ for model in model_fallback_list: try: response = embedding(model="text-embedding-ada-002", input=[user_message]) response = completion(model=model, messages=messages) - print(response) except Exception as e: print(f"error occurred: {traceback.format_exc()}")