diff --git a/litellm/main.py b/litellm/main.py index a0b233861..4886744d0 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -17,6 +17,7 @@ from litellm.utils import ( CustomStreamWrapper, read_config_args, completion_with_fallbacks, + verify_access_key, get_llm_provider ) from .llms import anthropic diff --git a/litellm/tests/test_verify_openai_key.py b/litellm/tests/test_verify_openai_key.py new file mode 100644 index 000000000..e671641ac --- /dev/null +++ b/litellm/tests/test_verify_openai_key.py @@ -0,0 +1,20 @@ +from litellm import verify_access_key +import os + +def test_bad_key(): + key = "bad-key" + response = verify_access_key(key) + if response == False: + pass + else: + raise Exception("Bad key was not detected") +test_bad_key() + +def test_good_key(): + key = os.environ['OPENAI_API_KEY'] + response = verify_access_key(key) + if response == True: + pass + else: + raise Exception("Good key did not pass") +test_good_key() \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 4932d9d69..83c0e05f9 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2604,6 +2604,22 @@ def trim_messages( return messages + +# Verify that the user has passed in a valid and active api key +def verify_access_key(access_key:str): + openai.api_key = access_key + try: + test_response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "test"}, + ], + max_tokens = 10 + ) + return True + except: + return False + # this helper reads the .env and returns a list of supported llms for user def get_valid_models(): try: @@ -2632,4 +2648,4 @@ def get_valid_models(): valid_models.extend(models_for_provider) return valid_models except: - return [] # NON-Blocking \ No newline at end of file + return [] # NON-Blocking