diff --git a/litellm/main.py b/litellm/main.py index c0df0f4cc..51d35b4d8 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -17,6 +17,7 @@ from litellm.utils import ( CustomStreamWrapper, read_config_args, completion_with_fallbacks, + verify_access_key, ) from .llms import anthropic from .llms import together_ai diff --git a/litellm/tests/test_verify_openai_key.py b/litellm/tests/test_verify_openai_key.py new file mode 100644 index 000000000..e5cfdb1dd --- /dev/null +++ b/litellm/tests/test_verify_openai_key.py @@ -0,0 +1,7 @@ +from litellm import verify_access_key + +def bad_key(): + key = "bad-key" + response = verify_access_key(key) + print(f"response: {response}") +bad_key() \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 1a36fd918..db6cd1e4f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2554,3 +2554,18 @@ def trim_messages( except: # [NON-Blocking, if error occurs just return final_messages return messages + +# Verify that the user has passed in a valid and active api key +def verify_access_key(access_key:str): + openai.api_key = access_key + try: + test_response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "test"}, + ], + max_tokens = 10 + ) + return False + except: + return True \ No newline at end of file