(fix) failing prompt layer test

This commit is contained in:
ishaan-jaff 2024-02-22 13:36:14 -08:00
parent b6c7c3b385
commit 07fc45e01b
2 changed files with 11 additions and 10 deletions

View file

@ -5104,8 +5104,8 @@ async def google_login(request: Request):
redirect_params = {} redirect_params = {}
state = os.getenv("GENERIC_CLIENT_STATE", None) state = os.getenv("GENERIC_CLIENT_STATE", None)
if state: if state:
redirect_params['state'] = state redirect_params["state"] = state
return await generic_sso.get_login_redirect(**redirect_params) return await generic_sso.get_login_redirect(**redirect_params) # type: ignore
elif ui_username is not None: elif ui_username is not None:
# No Google, Microsoft SSO # No Google, Microsoft SSO
# Use UI Credentials set in .env # Use UI Credentials set in .env
@ -5368,15 +5368,15 @@ async def auth_callback(request: Request):
# generic client id # generic client id
if generic_client_id is not None: if generic_client_id is not None:
user_id = result.id user_id = getattr(result, "id", None)
user_email = result.email user_email = getattr(result, "email", None)
user_role = getattr(result, generic_user_role_attribute_name, None) user_role = getattr(result, generic_user_role_attribute_name, None)
if user_id is None: if user_id is None:
user_id = getattr(result, "first_name", "") + getattr(result, "last_name", "") user_id = getattr(result, "first_name", "") + getattr(result, "last_name", "")
user_info = None user_info = None
user_id_models = [] user_id_models: List = []
# User might not be already created on first generation of key # User might not be already created on first generation of key
# But if it is, we want its models preferences # But if it is, we want its models preferences

View file

@ -9,8 +9,6 @@ import litellm
import pytest import pytest
litellm.success_callback = ["promptlayer"]
litellm.set_verbose = True
import time import time
# def test_promptlayer_logging(): # def test_promptlayer_logging():
@ -45,6 +43,8 @@ def test_promptlayer_logging_with_metadata():
# Redirect stdout # Redirect stdout
old_stdout = sys.stdout old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO() sys.stdout = new_stdout = io.StringIO()
litellm.set_verbose = True
litellm.success_callback = ["promptlayer"]
response = completion( response = completion(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
@ -69,6 +69,9 @@ def test_promptlayer_logging_with_metadata():
def test_promptlayer_logging_with_metadata_tags(): def test_promptlayer_logging_with_metadata_tags():
try: try:
# Redirect stdout # Redirect stdout
litellm.set_verbose = True
litellm.success_callback = ["promptlayer"]
old_stdout = sys.stdout old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO() sys.stdout = new_stdout = io.StringIO()
@ -78,7 +81,7 @@ def test_promptlayer_logging_with_metadata_tags():
temperature=0.2, temperature=0.2,
max_tokens=20, max_tokens=20,
metadata={"model": "ai21", "pl_tags": ["env:dev"]}, metadata={"model": "ai21", "pl_tags": ["env:dev"]},
mock_response="this is a mock response" mock_response="this is a mock response",
) )
# Restore stdout # Restore stdout
@ -92,8 +95,6 @@ def test_promptlayer_logging_with_metadata_tags():
except Exception as e: except Exception as e:
pytest.fail(f"Error occurred: {e}") pytest.fail(f"Error occurred: {e}")
test_promptlayer_logging_with_metadata()
test_promptlayer_logging_with_metadata_tags()
# def test_chat_openai(): # def test_chat_openai():
# try: # try: