Litellm ruff linting enforcement (#5992)

* ci(config.yml): add a 'check_code_quality' step

Addresses https://github.com/BerriAI/litellm/issues/5991

* ci(config.yml): check why circle ci doesn't pick up this test

* ci(config.yml): fix to run 'check_code_quality' tests

* fix(__init__.py): fix unprotected import

* fix(__init__.py): don't remove unused imports

* build(ruff.toml): update ruff.toml to ignore unused imports

* fix: fix: ruff + pyright - fix linting + type-checking errors

* fix: fix linting errors

* fix(lago.py): fix module init error

* fix: fix linting errors

* ci(config.yml): cd into correct dir for checks

* fix(proxy_server.py): fix linting error

* fix(utils.py): fix bare except

causes ruff linting errors

* fix: ruff - fix remaining linting errors

* fix(clickhouse.py): use standard logging object

* fix(__init__.py): fix unprotected import

* fix: ruff - fix linting errors

* fix: fix linting errors

* ci(config.yml): cleanup code qa step (formatting handled in local_testing)

* fix(_health_endpoints.py): fix ruff linting errors

* ci(config.yml): just use ruff in check_code_quality pipeline for now

* build(custom_guardrail.py): include missing file

* style(embedding_handler.py): fix ruff check
This commit is contained in:
Krish Dholakia 2024-10-01 16:44:20 -07:00 committed by GitHub
parent 3fc4ae0d65
commit d57be47b0f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
263 changed files with 1687 additions and 3320 deletions

View file

@ -62,7 +62,7 @@ def get_error_message(error_obj) -> Optional[str]:
# If all else fails, return None
return None
except Exception as e:
except Exception:
return None
@ -910,7 +910,7 @@ def exception_type( # type: ignore
):
exception_mapping_worked = True
raise BadRequestError(
message=f"SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints",
message="SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints",
model=model,
llm_provider="sagemaker",
response=original_exception.response,
@ -1122,7 +1122,7 @@ def exception_type( # type: ignore
# 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate.
exception_mapping_worked = True
raise BadRequestError(
message=f"GeminiException - Invalid api key",
message="GeminiException - Invalid api key",
model=model,
llm_provider="palm",
response=original_exception.response,
@ -2067,12 +2067,34 @@ def exception_logging(
logger_fn(
model_call_details
) # Expectation: any logger function passed in by the user should accept a dict object
except Exception as e:
except Exception:
verbose_logger.debug(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
except Exception as e:
except Exception:
verbose_logger.debug(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
)
pass
def _add_key_name_and_team_to_alert(request_info: str, metadata: dict) -> str:
"""
Internal helper function for litellm proxy
Add the Key Name + Team Name to the error
Only gets added if the metadata contains the user_api_key_alias and user_api_key_team_alias
[Non-Blocking helper function]
"""
try:
_api_key_name = metadata.get("user_api_key_alias", None)
_user_api_key_team_alias = metadata.get("user_api_key_team_alias", None)
if _api_key_name is not None:
request_info = (
f"\n\nKey Name: `{_api_key_name}`\nTeam: `{_user_api_key_team_alias}`"
+ request_info
)
return request_info
except Exception:
return request_info

View file

@ -476,7 +476,7 @@ def get_llm_provider(
elif model == "*":
custom_llm_provider = "openai"
if custom_llm_provider is None or custom_llm_provider == "":
if litellm.suppress_debug_info == False:
if litellm.suppress_debug_info is False:
print() # noqa
print( # noqa
"\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m" # noqa

View file

@ -52,18 +52,8 @@ from litellm.types.utils import (
)
from litellm.utils import (
_get_base_model_from_metadata,
add_breadcrumb,
capture_exception,
customLogger,
liteDebuggerClient,
logfireLogger,
lunaryLogger,
print_verbose,
prometheusLogger,
prompt_token_calculator,
promptLayerLogger,
supabaseClient,
weightsBiasesLogger,
)
from ..integrations.aispend import AISpendLogger
@ -71,7 +61,6 @@ from ..integrations.athina import AthinaLogger
from ..integrations.berrispend import BerriSpendLogger
from ..integrations.braintrust_logging import BraintrustLogger
from ..integrations.clickhouse import ClickhouseLogger
from ..integrations.custom_logger import CustomLogger
from ..integrations.datadog.datadog import DataDogLogger
from ..integrations.dynamodb import DyanmoDBLogger
from ..integrations.galileo import GalileoObserve
@ -423,7 +412,7 @@ class Logging:
elif callback == "sentry" and add_breadcrumb:
try:
details_to_log = copy.deepcopy(self.model_call_details)
except:
except Exception:
details_to_log = self.model_call_details
if litellm.turn_off_message_logging:
# make a copy of the _model_Call_details and log it
@ -528,7 +517,7 @@ class Logging:
verbose_logger.debug("reaches sentry breadcrumbing")
try:
details_to_log = copy.deepcopy(self.model_call_details)
except:
except Exception:
details_to_log = self.model_call_details
if litellm.turn_off_message_logging:
# make a copy of the _model_Call_details and log it
@ -1326,7 +1315,7 @@ class Logging:
and customLogger is not None
): # custom logger functions
print_verbose(
f"success callbacks: Running Custom Callback Function"
"success callbacks: Running Custom Callback Function"
)
customLogger.log_event(
kwargs=self.model_call_details,
@ -1400,7 +1389,7 @@ class Logging:
self.model_call_details["response_cost"] = 0.0
else:
# check if base_model set on azure
base_model = _get_base_model_from_metadata(
_get_base_model_from_metadata(
model_call_details=self.model_call_details
)
# base_model defaults to None if not set on model_info
@ -1483,7 +1472,7 @@ class Logging:
for callback in callbacks:
# check if callback can run for this request
litellm_params = self.model_call_details.get("litellm_params", {})
if litellm_params.get("no-log", False) == True:
if litellm_params.get("no-log", False) is True:
# proxy cost tracking cal backs should run
if not (
isinstance(callback, CustomLogger)
@ -1492,7 +1481,7 @@ class Logging:
print_verbose("no-log request, skipping logging")
continue
try:
if kwargs.get("no-log", False) == True:
if kwargs.get("no-log", False) is True:
print_verbose("no-log request, skipping logging")
continue
if (
@ -1641,7 +1630,7 @@ class Logging:
end_time=end_time,
print_verbose=print_verbose,
)
except Exception as e:
except Exception:
verbose_logger.error(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
)
@ -2433,7 +2422,7 @@ def get_standard_logging_object_payload(
call_type = kwargs.get("call_type")
cache_hit = kwargs.get("cache_hit", False)
usage = response_obj.get("usage", None) or {}
if type(usage) == litellm.Usage:
if type(usage) is litellm.Usage:
usage = dict(usage)
id = response_obj.get("id", kwargs.get("litellm_call_id"))
@ -2656,3 +2645,11 @@ def scrub_sensitive_keys_in_metadata(litellm_params: Optional[dict]):
litellm_params["metadata"] = metadata
return litellm_params
# integration helper function
def modify_integration(integration_name, integration_params):
global supabaseClient
if integration_name == "supabase":
if "table_name" in integration_params:
Supabase.supabase_table_name = integration_params["table_name"]

View file

@ -45,7 +45,7 @@ def pick_cheapest_chat_model_from_llm_provider(custom_llm_provider: str):
model_info = litellm.get_model_info(
model=model, custom_llm_provider=custom_llm_provider
)
except:
except Exception:
continue
if model_info.get("mode") != "chat":
continue