forked from phoenix/litellm-mirror
util: verify_access_key
Verify that the user provided a valid openai token by creating a request to the openAI endpoint.
This commit is contained in:
parent
c3ed7e2b29
commit
261db15bcf
3 changed files with 23 additions and 0 deletions
|
@ -17,6 +17,7 @@ from litellm.utils import (
|
|||
CustomStreamWrapper,
|
||||
read_config_args,
|
||||
completion_with_fallbacks,
|
||||
verify_access_key,
|
||||
)
|
||||
from .llms import anthropic
|
||||
from .llms import together_ai
|
||||
|
|
7
litellm/tests/test_verify_openai_key.py
Normal file
7
litellm/tests/test_verify_openai_key.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
from litellm import verify_access_key
|
||||
|
||||
def bad_key():
|
||||
key = "bad-key"
|
||||
response = verify_access_key(key)
|
||||
print(f"response: {response}")
|
||||
bad_key()
|
|
@ -2554,3 +2554,18 @@ def trim_messages(
|
|||
except: # [NON-Blocking, if error occurs just return final_messages
|
||||
return messages
|
||||
|
||||
|
||||
# Verify that the user has passed in a valid and active api key
|
||||
def verify_access_key(access_key:str):
|
||||
openai.api_key = access_key
|
||||
try:
|
||||
test_response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "user", "content": "test"},
|
||||
],
|
||||
max_tokens = 10
|
||||
)
|
||||
return False
|
||||
except:
|
||||
return True
|
Loading…
Add table
Add a link
Reference in a new issue