From 610974b4fce4ccd1546d99b43037f09219727489 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 18 Oct 2024 15:36:49 +0530 Subject: [PATCH] (code quality) add ruff check PLR0915 for `too-many-statements` (#6309) * ruff add PLR0915 * add noqa for PLR0915 * fix noqa * add # noqa: PLR0915 * # noqa: PLR0915 * # noqa: PLR0915 * # noqa: PLR0915 * add # noqa: PLR0915 * # noqa: PLR0915 * # noqa: PLR0915 * # noqa: PLR0915 * # noqa: PLR0915 --- db_scripts/create_views.py | 2 +- litellm/caching/caching.py | 2 +- litellm/caching/qdrant_semantic_cache.py | 2 +- litellm/cost_calculator.py | 4 +-- .../SlackAlerting/slack_alerting.py | 4 +-- litellm/integrations/braintrust_logging.py | 8 +++-- litellm/integrations/langfuse.py | 4 +-- litellm/integrations/langsmith.py | 4 ++- litellm/integrations/opentelemetry.py | 2 +- litellm/integrations/opik/opik.py | 2 +- litellm/integrations/prometheus.py | 4 ++- .../exception_mapping_utils.py | 2 +- .../get_llm_provider_logic.py | 2 +- litellm/litellm_core_utils/litellm_logging.py | 14 ++++---- litellm/llms/AzureOpenAI/azure.py | 2 +- litellm/llms/OpenAI/openai.py | 2 +- .../transformation.py | 2 +- litellm/llms/azure_text.py | 2 +- litellm/llms/base_aws_llm.py | 2 +- litellm/llms/bedrock/chat/converse_handler.py | 2 +- litellm/llms/bedrock/chat/invoke_handler.py | 4 +-- litellm/llms/huggingface_restapi.py | 4 +-- litellm/llms/ollama_chat.py | 2 +- litellm/llms/predibase.py | 2 +- litellm/llms/prompt_templates/factory.py | 10 +++--- litellm/llms/sagemaker/sagemaker.py | 2 +- .../vertex_and_google_ai_studio_gemini.py | 2 +- .../vertex_ai_non_gemini.py | 8 ++--- litellm/main.py | 12 +++---- litellm/proxy/auth/auth_checks.py | 2 +- litellm/proxy/auth/user_api_key_auth.py | 2 +- litellm/proxy/common_utils/callback_utils.py | 2 +- litellm/proxy/db/create_views.py | 2 +- .../guardrails/guardrail_hooks/lakera_ai.py | 2 +- litellm/proxy/guardrails/init_guardrails.py | 2 +- .../health_endpoints/_health_endpoints.py | 2 +- .../proxy/hooks/parallel_request_limiter.py | 6 ++-- litellm/proxy/litellm_pre_call_utils.py | 2 +- .../internal_user_endpoints.py | 2 +- .../key_management_endpoints.py | 4 +-- .../management_endpoints/team_endpoints.py | 2 +- litellm/proxy/management_endpoints/ui_sso.py | 4 +-- .../pass_through_endpoints.py | 4 +-- litellm/proxy/proxy_cli.py | 2 +- litellm/proxy/proxy_server.py | 28 ++++++++-------- .../spend_management_endpoints.py | 2 +- litellm/proxy/utils.py | 8 ++--- litellm/router.py | 8 ++--- litellm/router_strategy/lowest_cost.py | 2 +- litellm/router_strategy/lowest_latency.py | 10 ++++-- litellm/router_strategy/lowest_tpm_rpm.py | 2 +- litellm/router_strategy/lowest_tpm_rpm_v2.py | 2 +- .../client_initalization_utils.py | 2 +- litellm/secret_managers/main.py | 2 +- litellm/utils.py | 32 ++++++++++--------- ruff.toml | 4 +-- 56 files changed, 137 insertions(+), 119 deletions(-) diff --git a/db_scripts/create_views.py b/db_scripts/create_views.py index c42f67124..2b6a28ccb 100644 --- a/db_scripts/create_views.py +++ b/db_scripts/create_views.py @@ -17,7 +17,7 @@ db = Prisma( ) -async def check_view_exists(): +async def check_view_exists(): # noqa: PLR0915 """ Checks if the LiteLLM_VerificationTokenView and MonthlyGlobalSpend exists in the user's db. diff --git a/litellm/caching/caching.py b/litellm/caching/caching.py index 457a7f7a4..cbec98f72 100644 --- a/litellm/caching/caching.py +++ b/litellm/caching/caching.py @@ -220,7 +220,7 @@ class Cache: if self.namespace is not None and isinstance(self.cache, RedisCache): self.cache.namespace = self.namespace - def get_cache_key(self, *args, **kwargs) -> str: + def get_cache_key(self, *args, **kwargs) -> str: # noqa: PLR0915 """ Get the cache key for the given arguments. diff --git a/litellm/caching/qdrant_semantic_cache.py b/litellm/caching/qdrant_semantic_cache.py index e34b28e18..be67001f6 100644 --- a/litellm/caching/qdrant_semantic_cache.py +++ b/litellm/caching/qdrant_semantic_cache.py @@ -20,7 +20,7 @@ from .base_cache import BaseCache class QdrantSemanticCache(BaseCache): - def __init__( + def __init__( # noqa: PLR0915 self, qdrant_api_base=None, qdrant_api_key=None, diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py index d86706f5b..a10f664ac 100644 --- a/litellm/cost_calculator.py +++ b/litellm/cost_calculator.py @@ -79,7 +79,7 @@ def _cost_per_token_custom_pricing_helper( return None -def cost_per_token( +def cost_per_token( # noqa: PLR0915 model: str = "", prompt_tokens: int = 0, completion_tokens: int = 0, @@ -474,7 +474,7 @@ def _select_model_name_for_cost_calc( return return_model -def completion_cost( +def completion_cost( # noqa: PLR0915 completion_response=None, model: Optional[str] = None, prompt="", diff --git a/litellm/integrations/SlackAlerting/slack_alerting.py b/litellm/integrations/SlackAlerting/slack_alerting.py index b39d8d2ee..92223aff3 100644 --- a/litellm/integrations/SlackAlerting/slack_alerting.py +++ b/litellm/integrations/SlackAlerting/slack_alerting.py @@ -287,7 +287,7 @@ class SlackAlerting(CustomBatchLogger): except Exception: return 0 - async def send_daily_reports(self, router) -> bool: + async def send_daily_reports(self, router) -> bool: # noqa: PLR0915 """ Send a daily report on: - Top 5 deployments with most failed requests @@ -573,7 +573,7 @@ class SlackAlerting(CustomBatchLogger): ttl=self.alerting_args.budget_alert_ttl, ) - async def budget_alerts( + async def budget_alerts( # noqa: PLR0915 self, type: Literal[ "token_budget", diff --git a/litellm/integrations/braintrust_logging.py b/litellm/integrations/braintrust_logging.py index d811f2291..a0c76a258 100644 --- a/litellm/integrations/braintrust_logging.py +++ b/litellm/integrations/braintrust_logging.py @@ -125,7 +125,9 @@ class BraintrustLogger(CustomLogger): self.default_project_id = project_dict["id"] - def log_success_event(self, kwargs, response_obj, start_time, end_time): + def log_success_event( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): verbose_logger.debug("REACHES BRAINTRUST SUCCESS") try: litellm_call_id = kwargs.get("litellm_call_id") @@ -237,7 +239,9 @@ class BraintrustLogger(CustomLogger): except Exception as e: raise e # don't use verbose_logger.exception, if exception is raised - async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + async def async_log_success_event( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): verbose_logger.debug("REACHES BRAINTRUST SUCCESS") try: litellm_call_id = kwargs.get("litellm_call_id") diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index c79b43422..38f52525a 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -143,7 +143,7 @@ class LangFuseLogger: # level ="ERROR" # can be any of DEBUG, DEFAULT, WARNING or ERROR # status_message='error' # can be any string (e.g. stringified stack trace or error body) # ) - def log_event( + def log_event( # noqa: PLR0915 self, kwargs, response_obj, @@ -349,7 +349,7 @@ class LangFuseLogger: ) ) - def _log_langfuse_v2( + def _log_langfuse_v2( # noqa: PLR0915 self, user_id, metadata, diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index 1584cd081..951393445 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -132,7 +132,9 @@ class LangsmithLogger(CustomBatchLogger): LANGSMITH_PROJECT=_credentials_project, ) - def _prepare_log_data(self, kwargs, response_obj, start_time, end_time): + def _prepare_log_data( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): import json from datetime import datetime as dt diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py index 4981f8f4e..171ec21e7 100644 --- a/litellm/integrations/opentelemetry.py +++ b/litellm/integrations/opentelemetry.py @@ -393,7 +393,7 @@ class OpenTelemetry(CustomLogger): except Exception: return "" - def set_attributes(self, span: Span, kwargs, response_obj): + def set_attributes(self, span: Span, kwargs, response_obj): # noqa: PLR0915 try: if self.callback_name == "arize": from litellm.integrations.arize_ai import set_arize_ai_attributes diff --git a/litellm/integrations/opik/opik.py b/litellm/integrations/opik/opik.py index 5cf0a1eca..c78c4de4e 100644 --- a/litellm/integrations/opik/opik.py +++ b/litellm/integrations/opik/opik.py @@ -182,7 +182,7 @@ class OpikLogger(CustomBatchLogger): url=self.span_url, headers=self.headers, batch={"spans": spans} ) - def _create_opik_payload( + def _create_opik_payload( # noqa: PLR0915 self, kwargs, response_obj, start_time, end_time ) -> List[Dict]: diff --git a/litellm/integrations/prometheus.py b/litellm/integrations/prometheus.py index f67caa405..370ab1575 100644 --- a/litellm/integrations/prometheus.py +++ b/litellm/integrations/prometheus.py @@ -321,7 +321,9 @@ class PrometheusLogger(CustomLogger): print_verbose(f"Got exception on init prometheus client {str(e)}") raise e - async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + async def async_log_success_event( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): # Define prometheus client from litellm.proxy.common_utils.callback_utils import ( get_model_group_from_litellm_kwargs, diff --git a/litellm/litellm_core_utils/exception_mapping_utils.py b/litellm/litellm_core_utils/exception_mapping_utils.py index 2572e695f..94eb5c623 100644 --- a/litellm/litellm_core_utils/exception_mapping_utils.py +++ b/litellm/litellm_core_utils/exception_mapping_utils.py @@ -89,7 +89,7 @@ def _get_response_headers(original_exception: Exception) -> Optional[httpx.Heade return _response_headers -def exception_type( # type: ignore +def exception_type( # type: ignore # noqa: PLR0915 model, original_exception, custom_llm_provider, diff --git a/litellm/litellm_core_utils/get_llm_provider_logic.py b/litellm/litellm_core_utils/get_llm_provider_logic.py index d778ce723..fad826680 100644 --- a/litellm/litellm_core_utils/get_llm_provider_logic.py +++ b/litellm/litellm_core_utils/get_llm_provider_logic.py @@ -67,7 +67,7 @@ def handle_cohere_chat_model_custom_llm_provider( return model, custom_llm_provider -def get_llm_provider( +def get_llm_provider( # noqa: PLR0915 model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index f6a7b2a37..d17b2c790 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -428,7 +428,7 @@ class Logging: ): # if model name was changes pre-call, overwrite the initial model call name with the new one self.model_call_details["model"] = model - def pre_call(self, input, api_key, model=None, additional_args={}): + def pre_call(self, input, api_key, model=None, additional_args={}): # noqa: PLR0915 # Log the exact input to the LLM API litellm.error_logs["PRE_CALL"] = locals() try: @@ -868,7 +868,7 @@ class Logging: except Exception as e: raise Exception(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}") - def success_handler( + def success_handler( # noqa: PLR0915 self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs ): print_verbose(f"Logging Details LiteLLM-Success Call: Cache_hit={cache_hit}") @@ -1494,7 +1494,7 @@ class Logging: ), ) - async def async_success_handler( + async def async_success_handler( # noqa: PLR0915 self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs ): """ @@ -1833,7 +1833,7 @@ class Logging: kwargs=self.model_call_details, ) # type: ignore - def failure_handler( + def failure_handler( # noqa: PLR0915 self, exception, traceback_exception, start_time=None, end_time=None ): verbose_logger.debug( @@ -2191,7 +2191,7 @@ class Logging: return None -def set_callbacks(callback_list, function_id=None): +def set_callbacks(callback_list, function_id=None): # noqa: PLR0915 """ Globally sets the callback client """ @@ -2301,7 +2301,7 @@ def set_callbacks(callback_list, function_id=None): raise e -def _init_custom_logger_compatible_class( +def _init_custom_logger_compatible_class( # noqa: PLR0915 logging_integration: litellm._custom_logger_compatible_callbacks_literal, internal_usage_cache: Optional[DualCache], llm_router: Optional[ @@ -2629,7 +2629,7 @@ def is_valid_sha256_hash(value: str) -> bool: return bool(re.fullmatch(r"[a-fA-F0-9]{64}", value)) -def get_standard_logging_object_payload( +def get_standard_logging_object_payload( # noqa: PLR0915 kwargs: Optional[dict], init_response_obj: Union[Any, BaseModel, dict], start_time: dt_object, diff --git a/litellm/llms/AzureOpenAI/azure.py b/litellm/llms/AzureOpenAI/azure.py index 3f9908289..c5efe2d12 100644 --- a/litellm/llms/AzureOpenAI/azure.py +++ b/litellm/llms/AzureOpenAI/azure.py @@ -344,7 +344,7 @@ class AzureChatCompletion(BaseLLM): except Exception as e: raise e - def completion( + def completion( # noqa: PLR0915 self, model: str, messages: list, diff --git a/litellm/llms/OpenAI/openai.py b/litellm/llms/OpenAI/openai.py index 704dcf304..cb118adca 100644 --- a/litellm/llms/OpenAI/openai.py +++ b/litellm/llms/OpenAI/openai.py @@ -612,7 +612,7 @@ class OpenAIChatCompletion(BaseLLM): else: raise e - def completion( # type: ignore + def completion( # type: ignore # noqa: PLR0915 self, model_response: ModelResponse, timeout: Union[float, httpx.Timeout], diff --git a/litellm/llms/anthropic/experimental_pass_through/transformation.py b/litellm/llms/anthropic/experimental_pass_through/transformation.py index 2a82594ba..0f9a31f83 100644 --- a/litellm/llms/anthropic/experimental_pass_through/transformation.py +++ b/litellm/llms/anthropic/experimental_pass_through/transformation.py @@ -73,7 +73,7 @@ class AnthropicExperimentalPassThroughConfig: """ return ["messages", "metadata", "system", "tool_choice", "tools"] - def translate_anthropic_messages_to_openai( + def translate_anthropic_messages_to_openai( # noqa: PLR0915 self, messages: List[ Union[ diff --git a/litellm/llms/azure_text.py b/litellm/llms/azure_text.py index 493d9586f..c75965a8f 100644 --- a/litellm/llms/azure_text.py +++ b/litellm/llms/azure_text.py @@ -141,7 +141,7 @@ class AzureTextCompletion(BaseLLM): headers["Authorization"] = f"Bearer {azure_ad_token}" return headers - def completion( + def completion( # noqa: PLR0915 self, model: str, messages: list, diff --git a/litellm/llms/base_aws_llm.py b/litellm/llms/base_aws_llm.py index ba1368a10..70e3defc7 100644 --- a/litellm/llms/base_aws_llm.py +++ b/litellm/llms/base_aws_llm.py @@ -38,7 +38,7 @@ class BaseAWSLLM(BaseLLM): credential_str = json.dumps(credential_args, sort_keys=True) return hashlib.sha256(credential_str.encode()).hexdigest() - def get_credentials( + def get_credentials( # noqa: PLR0915 self, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, diff --git a/litellm/llms/bedrock/chat/converse_handler.py b/litellm/llms/bedrock/chat/converse_handler.py index 6fbe81770..305bd86ce 100644 --- a/litellm/llms/bedrock/chat/converse_handler.py +++ b/litellm/llms/bedrock/chat/converse_handler.py @@ -200,7 +200,7 @@ class BedrockConverseLLM(BaseAWSLLM): encoding=encoding, ) - def completion( + def completion( # noqa: PLR0915 self, model: str, messages: list, diff --git a/litellm/llms/bedrock/chat/invoke_handler.py b/litellm/llms/bedrock/chat/invoke_handler.py index 90267da3a..7805f74dc 100644 --- a/litellm/llms/bedrock/chat/invoke_handler.py +++ b/litellm/llms/bedrock/chat/invoke_handler.py @@ -308,7 +308,7 @@ class BedrockLLM(BaseAWSLLM): prompt += f"{message['content']}" return prompt, chat_history # type: ignore - def process_response( + def process_response( # noqa: PLR0915 self, model: str, response: Union[requests.Response, httpx.Response], @@ -574,7 +574,7 @@ class BedrockLLM(BaseAWSLLM): """ return urllib.parse.quote(model_id, safe="") - def completion( + def completion( # noqa: PLR0915 self, model: str, messages: list, diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index 973cded0b..67db83ba2 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -333,7 +333,7 @@ class Huggingface(BaseLLM): headers = default_headers return headers - def convert_to_model_response_object( + def convert_to_model_response_object( # noqa: PLR0915 self, completion_response, model_response: litellm.ModelResponse, @@ -447,7 +447,7 @@ class Huggingface(BaseLLM): model_response._hidden_params["original_response"] = completion_response return model_response - def completion( + def completion( # noqa: PLR0915 self, model: str, messages: list, diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 7b23a5326..536f766e0 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -213,7 +213,7 @@ class OllamaChatConfig: # ollama implementation -def get_ollama_response( +def get_ollama_response( # noqa: PLR0915 model_response: litellm.ModelResponse, messages: list, optional_params: dict, diff --git a/litellm/llms/predibase.py b/litellm/llms/predibase.py index 602700294..96796f9dc 100644 --- a/litellm/llms/predibase.py +++ b/litellm/llms/predibase.py @@ -240,7 +240,7 @@ class PredibaseChatCompletion(BaseLLM): generated_text = generated_text[::-1].replace(token[::-1], "", 1)[::-1] return generated_text - def process_response( + def process_response( # noqa: PLR0915 self, model: str, response: Union[requests.Response, httpx.Response], diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 8dd0f67b6..15ee85fae 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -385,7 +385,9 @@ known_tokenizer_config = { } -def hf_chat_template(model: str, messages: list, chat_template: Optional[Any] = None): +def hf_chat_template( # noqa: PLR0915 + model: str, messages: list, chat_template: Optional[Any] = None +): # Define Jinja2 environment env = ImmutableSandboxedEnvironment() @@ -1339,7 +1341,7 @@ def add_cache_control_to_content( return anthropic_content_element -def anthropic_messages_pt( +def anthropic_messages_pt( # noqa: PLR0915 messages: List[AllMessageValues], model: str, llm_provider: str, @@ -1797,7 +1799,7 @@ def convert_to_cohere_tool_invoke(tool_calls: list) -> List[ToolCallObject]: return cohere_tool_invoke -def cohere_messages_pt_v2( +def cohere_messages_pt_v2( # noqa: PLR0915 messages: List, model: str, llm_provider: str, @@ -2409,7 +2411,7 @@ def _insert_assistant_continue_message( return messages -def _bedrock_converse_messages_pt( +def _bedrock_converse_messages_pt( # noqa: PLR0915 messages: List, model: str, llm_provider: str, diff --git a/litellm/llms/sagemaker/sagemaker.py b/litellm/llms/sagemaker/sagemaker.py index 27023ab3f..ecfa40b8c 100644 --- a/litellm/llms/sagemaker/sagemaker.py +++ b/litellm/llms/sagemaker/sagemaker.py @@ -248,7 +248,7 @@ class SagemakerLLM(BaseAWSLLM): return prompt - def completion( + def completion( # noqa: PLR0915 self, model: str, messages: list, diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/gemini/vertex_and_google_ai_studio_gemini.py b/litellm/llms/vertex_ai_and_google_ai_studio/gemini/vertex_and_google_ai_studio_gemini.py index bee96e0a1..ae3dddbdd 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/gemini/vertex_and_google_ai_studio_gemini.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/gemini/vertex_and_google_ai_studio_gemini.py @@ -742,7 +742,7 @@ class VertexLLM(VertexBase): def __init__(self) -> None: super().__init__() - def _process_response( + def _process_response( # noqa: PLR0915 self, model: str, response: httpx.Response, diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_non_gemini.py b/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_non_gemini.py index a2ed48eca..5b50868a8 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_non_gemini.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/vertex_ai_non_gemini.py @@ -131,7 +131,7 @@ def _process_gemini_image(image_url: str) -> PartType: raise e -def _gemini_convert_messages_with_history( +def _gemini_convert_messages_with_history( # noqa: PLR0915 messages: List[AllMessageValues], ) -> List[ContentType]: """ @@ -279,7 +279,7 @@ def _set_client_in_cache(client_cache_key: str, vertex_llm_model: Any): litellm.in_memory_llm_clients_cache[client_cache_key] = vertex_llm_model -def completion( +def completion( # noqa: PLR0915 model: str, messages: list, model_response: ModelResponse, @@ -770,7 +770,7 @@ def completion( ) -async def async_completion( +async def async_completion( # noqa: PLR0915 llm_model, mode: str, prompt: str, @@ -1010,7 +1010,7 @@ async def async_completion( raise VertexAIError(status_code=500, message=str(e)) -async def async_streaming( +async def async_streaming( # noqa: PLR0915 llm_model, mode: str, prompt: str, diff --git a/litellm/main.py b/litellm/main.py index 96ec304e5..f93db2a8f 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -657,7 +657,7 @@ def mock_completion( @client -def completion( # type: ignore +def completion( # type: ignore # noqa: PLR0915 model: str, # Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create messages: List = [], @@ -3136,7 +3136,7 @@ async def aembedding(*args, **kwargs) -> EmbeddingResponse: @client -def embedding( +def embedding( # noqa: PLR0915 model, input=[], # Optional params @@ -3911,7 +3911,7 @@ async def atext_completion( @client -def text_completion( +def text_completion( # noqa: PLR0915 prompt: Union[ str, List[Union[str, List[Union[str, List[int]]]]] ], # Required: The prompt(s) to generate completions for. @@ -4378,7 +4378,7 @@ async def aimage_generation(*args, **kwargs) -> ImageResponse: @client -def image_generation( +def image_generation( # noqa: PLR0915 prompt: str, model: Optional[str] = None, n: Optional[int] = None, @@ -5111,7 +5111,7 @@ def speech( ##### Health Endpoints ####################### -async def ahealth_check( +async def ahealth_check( # noqa: PLR0915 model_params: dict, mode: Optional[ Literal[ @@ -5374,7 +5374,7 @@ def stream_chunk_builder_text_completion( return TextCompletionResponse(**response) -def stream_chunk_builder( +def stream_chunk_builder( # noqa: PLR0915 chunks: list, messages: Optional[list] = None, start_time=None, end_time=None ) -> Optional[Union[ModelResponse, TextCompletionResponse]]: try: diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index 92276aca8..940c74b92 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -44,7 +44,7 @@ else: all_routes = LiteLLMRoutes.openai_routes.value + LiteLLMRoutes.management_routes.value -def common_checks( +def common_checks( # noqa: PLR0915 request_body: dict, team_object: Optional[LiteLLM_TeamTable], user_object: Optional[LiteLLM_UserTable], diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index fdd932cd6..8322a78df 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -105,7 +105,7 @@ def _get_bearer_token( return api_key -async def user_api_key_auth( +async def user_api_key_auth( # noqa: PLR0915 request: Request, api_key: str = fastapi.Security(api_key_header), azure_api_key_header: str = fastapi.Security(azure_api_key_header), diff --git a/litellm/proxy/common_utils/callback_utils.py b/litellm/proxy/common_utils/callback_utils.py index 538a1eee1..c3c19f551 100644 --- a/litellm/proxy/common_utils/callback_utils.py +++ b/litellm/proxy/common_utils/callback_utils.py @@ -11,7 +11,7 @@ blue_color_code = "\033[94m" reset_color_code = "\033[0m" -def initialize_callbacks_on_proxy( +def initialize_callbacks_on_proxy( # noqa: PLR0915 value: Any, premium_user: bool, config_file_path: str, diff --git a/litellm/proxy/db/create_views.py b/litellm/proxy/db/create_views.py index da10d0c78..2fff3d085 100644 --- a/litellm/proxy/db/create_views.py +++ b/litellm/proxy/db/create_views.py @@ -5,7 +5,7 @@ from litellm import verbose_logger _db = Any -async def create_missing_views(db: _db): +async def create_missing_views(db: _db): # noqa: PLR0915 """ -------------------------------------------------- NOTE: Copy of `litellm/db_scripts/create_views.py`. diff --git a/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py b/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py index d15a4a7d5..7eab3588a 100644 --- a/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py +++ b/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py @@ -119,7 +119,7 @@ class lakeraAI_Moderation(CustomGuardrail): return None - async def _check( + async def _check( # noqa: PLR0915 self, data: dict, user_api_key_dict: UserAPIKeyAuth, diff --git a/litellm/proxy/guardrails/init_guardrails.py b/litellm/proxy/guardrails/init_guardrails.py index 3611490d5..baec7a640 100644 --- a/litellm/proxy/guardrails/init_guardrails.py +++ b/litellm/proxy/guardrails/init_guardrails.py @@ -86,7 +86,7 @@ Map guardrail_name: , , during_call """ -def init_guardrails_v2( +def init_guardrails_v2( # noqa: PLR0915 all_guardrails: List[Dict], config_file_path: Optional[str] = None, ): diff --git a/litellm/proxy/health_endpoints/_health_endpoints.py b/litellm/proxy/health_endpoints/_health_endpoints.py index f9e3f5320..78b2a3d20 100644 --- a/litellm/proxy/health_endpoints/_health_endpoints.py +++ b/litellm/proxy/health_endpoints/_health_endpoints.py @@ -53,7 +53,7 @@ async def test_endpoint(request: Request): dependencies=[Depends(user_api_key_auth)], include_in_schema=False, ) -async def health_services_endpoint( +async def health_services_endpoint( # noqa: PLR0915 user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), service: Union[ Literal[ diff --git a/litellm/proxy/hooks/parallel_request_limiter.py b/litellm/proxy/hooks/parallel_request_limiter.py index f34a9bbac..75fbb68e2 100644 --- a/litellm/proxy/hooks/parallel_request_limiter.py +++ b/litellm/proxy/hooks/parallel_request_limiter.py @@ -117,7 +117,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger): headers={"retry-after": str(self.time_to_next_minute())}, ) - async def async_pre_call_hook( + async def async_pre_call_hook( # noqa: PLR0915 self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, @@ -411,7 +411,9 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger): return - async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + async def async_log_success_event( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): from litellm.proxy.common_utils.callback_utils import ( get_model_group_from_litellm_kwargs, ) diff --git a/litellm/proxy/litellm_pre_call_utils.py b/litellm/proxy/litellm_pre_call_utils.py index 5630160f9..62f4ce440 100644 --- a/litellm/proxy/litellm_pre_call_utils.py +++ b/litellm/proxy/litellm_pre_call_utils.py @@ -211,7 +211,7 @@ def add_litellm_data_for_backend_llm_call( return data -async def add_litellm_data_to_request( +async def add_litellm_data_to_request( # noqa: PLR0915 data: dict, request: Request, user_api_key_dict: UserAPIKeyAuth, diff --git a/litellm/proxy/management_endpoints/internal_user_endpoints.py b/litellm/proxy/management_endpoints/internal_user_endpoints.py index bea27ece1..fa96fe08c 100644 --- a/litellm/proxy/management_endpoints/internal_user_endpoints.py +++ b/litellm/proxy/management_endpoints/internal_user_endpoints.py @@ -308,7 +308,7 @@ def get_team_from_list( # response_model=UserInfoResponse, ) @management_endpoint_wrapper -async def user_info( +async def user_info( # noqa: PLR0915 user_id: Optional[str] = fastapi.Query( default=None, description="User ID in the request parameters" ), diff --git a/litellm/proxy/management_endpoints/key_management_endpoints.py b/litellm/proxy/management_endpoints/key_management_endpoints.py index 152de4edf..01baa232f 100644 --- a/litellm/proxy/management_endpoints/key_management_endpoints.py +++ b/litellm/proxy/management_endpoints/key_management_endpoints.py @@ -45,7 +45,7 @@ router = APIRouter() response_model=GenerateKeyResponse, ) @management_endpoint_wrapper -async def generate_key_fn( +async def generate_key_fn( # noqa: PLR0915 data: GenerateKeyRequest, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), litellm_changed_by: Optional[str] = Header( @@ -768,7 +768,7 @@ async def info_key_fn( ) -async def generate_key_helper_fn( +async def generate_key_helper_fn( # noqa: PLR0915 request_type: Literal[ "user", "key" ], # identifies if this request is from /user/new or /key/generate diff --git a/litellm/proxy/management_endpoints/team_endpoints.py b/litellm/proxy/management_endpoints/team_endpoints.py index 35036b61c..965f07da7 100644 --- a/litellm/proxy/management_endpoints/team_endpoints.py +++ b/litellm/proxy/management_endpoints/team_endpoints.py @@ -92,7 +92,7 @@ async def get_all_team_memberships( response_model=LiteLLM_TeamTable, ) @management_endpoint_wrapper -async def new_team( +async def new_team( # noqa: PLR0915 data: NewTeamRequest, http_request: Request, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), diff --git a/litellm/proxy/management_endpoints/ui_sso.py b/litellm/proxy/management_endpoints/ui_sso.py index d515baa96..9a49646e6 100644 --- a/litellm/proxy/management_endpoints/ui_sso.py +++ b/litellm/proxy/management_endpoints/ui_sso.py @@ -46,7 +46,7 @@ router = APIRouter() @router.get("/sso/key/generate", tags=["experimental"], include_in_schema=False) -async def google_login(request: Request): +async def google_login(request: Request): # noqa: PLR0915 """ Create Proxy API Keys using Google Workspace SSO. Requires setting PROXY_BASE_URL in .env PROXY_BASE_URL should be the your deployed proxy endpoint, e.g. PROXY_BASE_URL="https://litellm-production-7002.up.railway.app/" @@ -221,7 +221,7 @@ async def google_login(request: Request): @router.get("/sso/callback", tags=["experimental"], include_in_schema=False) -async def auth_callback(request: Request): +async def auth_callback(request: Request): # noqa: PLR0915 """Verify login""" from litellm.proxy.management_endpoints.key_management_endpoints import ( generate_key_helper_fn, diff --git a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py index 0e2def3cb..8577181ce 100644 --- a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py @@ -105,7 +105,7 @@ async def set_env_variables_in_header(custom_headers: Optional[dict]) -> Optiona return headers -async def chat_completion_pass_through_endpoint( +async def chat_completion_pass_through_endpoint( # noqa: PLR0915 fastapi_response: Response, request: Request, adapter_id: str, @@ -306,7 +306,7 @@ def get_endpoint_type(url: str) -> EndpointType: return EndpointType.GENERIC -async def pass_through_request( +async def pass_through_request( # noqa: PLR0915 request: Request, target: str, custom_headers: dict, diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 5db4f2d0a..187429bae 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -216,7 +216,7 @@ def is_port_in_use(port): envvar="SSL_CERTFILE_PATH", ) @click.option("--local", is_flag=True, default=False, help="for local debugging") -def run_server( +def run_server( # noqa: PLR0915 host, port, api_base, diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 18d51fe41..b3739e491 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -881,7 +881,7 @@ def _set_spend_logs_payload( return prisma_client -async def update_database( +async def update_database( # noqa: PLR0915 token, response_cost, user_id=None, @@ -1063,7 +1063,7 @@ async def update_database( ) -async def update_cache( +async def update_cache( # noqa: PLR0915 token: Optional[str], user_id: Optional[str], end_user_id: Optional[str], @@ -1492,7 +1492,7 @@ class ProxyConfig: ## INIT PROXY REDIS USAGE CLIENT ## redis_usage_cache = litellm.cache.cache - async def load_config( + async def load_config( # noqa: PLR0915 self, router: Optional[litellm.Router], config_file_path: str ): """ @@ -2144,7 +2144,7 @@ class ProxyConfig: added_models += 1 return added_models - async def _update_llm_router( + async def _update_llm_router( # noqa: PLR0915 self, new_models: list, proxy_logging_obj: ProxyLogging, @@ -2387,7 +2387,7 @@ def save_worker_config(**data): os.environ["WORKER_CONFIG"] = json.dumps(data) -async def initialize( +async def initialize( # noqa: PLR0915 model=None, alias=None, api_base=None, @@ -2728,7 +2728,7 @@ def giveup(e): @router.on_event("startup") -async def startup_event(): +async def startup_event(): # noqa: PLR0915 global prisma_client, master_key, use_background_health_checks, llm_router, llm_model_list, general_settings, proxy_budget_rescheduler_min_time, proxy_budget_rescheduler_max_time, litellm_proxy_admin_name, db_writer_client, store_model_in_db, premium_user, _license_check import json @@ -3078,7 +3078,7 @@ async def model_list( giveup=giveup, logger=verbose_proxy_logger, ) -async def chat_completion( +async def chat_completion( # noqa: PLR0915 request: Request, fastapi_response: Response, model: Optional[str] = None, @@ -3357,7 +3357,7 @@ async def chat_completion( dependencies=[Depends(user_api_key_auth)], tags=["completions"], ) -async def completion( +async def completion( # noqa: PLR0915 request: Request, fastapi_response: Response, model: Optional[str] = None, @@ -3580,7 +3580,7 @@ async def completion( response_class=ORJSONResponse, tags=["embeddings"], ) # azure compatible endpoint -async def embeddings( +async def embeddings( # noqa: PLR0915 request: Request, fastapi_response: Response, model: Optional[str] = None, @@ -5469,7 +5469,7 @@ async def moderations( dependencies=[Depends(user_api_key_auth)], response_model=AnthropicResponse, ) -async def anthropic_response( +async def anthropic_response( # noqa: PLR0915 anthropic_data: AnthropicMessagesRequest, fastapi_response: Response, request: Request, @@ -7278,7 +7278,7 @@ async def model_metrics_exceptions( tags=["model management"], dependencies=[Depends(user_api_key_auth)], ) -async def model_info_v1( +async def model_info_v1( # noqa: PLR0915 user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), litellm_model_id: Optional[str] = None, ): @@ -7985,7 +7985,7 @@ async def fallback_login(request: Request): @router.post( "/login", include_in_schema=False ) # hidden since this is a helper for UI sso login -async def login(request: Request): +async def login(request: Request): # noqa: PLR0915 global premium_user, general_settings try: import multipart @@ -8653,7 +8653,7 @@ async def invitation_delete( dependencies=[Depends(user_api_key_auth)], include_in_schema=False, ) -async def update_config(config_info: ConfigYAML): +async def update_config(config_info: ConfigYAML): # noqa: PLR0915 """ For Admin UI - allows admin to update config via UI @@ -9156,7 +9156,7 @@ async def delete_config_general_settings( include_in_schema=False, dependencies=[Depends(user_api_key_auth)], ) -async def get_config(): +async def get_config(): # noqa: PLR0915 """ For Admin UI - allows admin to view config via UI # return the callbacks and the env variables for the callback diff --git a/litellm/proxy/spend_tracking/spend_management_endpoints.py b/litellm/proxy/spend_tracking/spend_management_endpoints.py index 85592ff02..f6d36daaf 100644 --- a/litellm/proxy/spend_tracking/spend_management_endpoints.py +++ b/litellm/proxy/spend_tracking/spend_management_endpoints.py @@ -1597,7 +1597,7 @@ async def calculate_spend(request: SpendCalculateRequest): 200: {"model": List[LiteLLM_SpendLogs]}, }, ) -async def view_spend_logs( +async def view_spend_logs( # noqa: PLR0915 api_key: Optional[str] = fastapi.Query( default=None, description="Get spend logs based on api key", diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 351cba24f..d41cf2dfb 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -1426,7 +1426,7 @@ class PrismaClient: on_backoff=on_backoff, # specifying the function to call on backoff ) @log_to_opentelemetry - async def get_data( + async def get_data( # noqa: PLR0915 self, token: Optional[Union[str, list]] = None, user_id: Optional[str] = None, @@ -1780,7 +1780,7 @@ class PrismaClient: max_time=10, # maximum total time to retry for on_backoff=on_backoff, # specifying the function to call on backoff ) - async def insert_data( + async def insert_data( # noqa: PLR0915 self, data: dict, table_name: Literal[ @@ -1928,7 +1928,7 @@ class PrismaClient: max_time=10, # maximum total time to retry for on_backoff=on_backoff, # specifying the function to call on backoff ) - async def update_data( + async def update_data( # noqa: PLR0915 self, token: Optional[str] = None, data: dict = {}, @@ -2617,7 +2617,7 @@ async def reset_budget(prisma_client: PrismaClient): ) -async def update_spend( +async def update_spend( # noqa: PLR0915 prisma_client: PrismaClient, db_writer_client: Optional[HTTPHandler], proxy_logging_obj: ProxyLogging, diff --git a/litellm/router.py b/litellm/router.py index 233ef4feb..142a781bb 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -156,7 +156,7 @@ class Router: leastbusy_logger: Optional[LeastBusyLoggingHandler] = None lowesttpm_logger: Optional[LowestTPMLoggingHandler] = None - def __init__( + def __init__( # noqa: PLR0915 self, model_list: Optional[ Union[List[DeploymentTypedDict], List[Dict[str, Any]]] @@ -2565,7 +2565,7 @@ class Router: #### [END] ASSISTANTS API #### - async def async_function_with_fallbacks(self, *args, **kwargs): + async def async_function_with_fallbacks(self, *args, **kwargs): # noqa: PLR0915 """ Try calling the function_with_retries If it fails after num_retries, fall back to another model group @@ -4170,7 +4170,7 @@ class Router: model_name = model_info["model_name"] return self.get_model_list(model_name=model_name) - def _set_model_group_info( + def _set_model_group_info( # noqa: PLR0915 self, model_group: str, user_facing_model_group_name: str ) -> Optional[ModelGroupInfo]: """ @@ -4696,7 +4696,7 @@ class Router: client = self.cache.get_cache(key=cache_key) return client - def _pre_call_checks( + def _pre_call_checks( # noqa: PLR0915 self, model: str, healthy_deployments: List, diff --git a/litellm/router_strategy/lowest_cost.py b/litellm/router_strategy/lowest_cost.py index a3bee348b..a9da47d0e 100644 --- a/litellm/router_strategy/lowest_cost.py +++ b/litellm/router_strategy/lowest_cost.py @@ -209,7 +209,7 @@ class LowestCostLoggingHandler(CustomLogger): ) pass - async def async_get_available_deployments( + async def async_get_available_deployments( # noqa: PLR0915 self, model_group: str, healthy_deployments: list, diff --git a/litellm/router_strategy/lowest_latency.py b/litellm/router_strategy/lowest_latency.py index 4eb9c967f..fc47d64c7 100644 --- a/litellm/router_strategy/lowest_latency.py +++ b/litellm/router_strategy/lowest_latency.py @@ -44,7 +44,9 @@ class LowestLatencyLoggingHandler(CustomLogger): self.model_list = model_list self.routing_args = RoutingArgs(**routing_args) - def log_success_event(self, kwargs, response_obj, start_time, end_time): + def log_success_event( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): try: """ Update latency usage on success @@ -244,7 +246,9 @@ class LowestLatencyLoggingHandler(CustomLogger): ) pass - async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + async def async_log_success_event( # noqa: PLR0915 + self, kwargs, response_obj, start_time, end_time + ): try: """ Update latency usage on success @@ -375,7 +379,7 @@ class LowestLatencyLoggingHandler(CustomLogger): ) pass - def get_available_deployments( + def get_available_deployments( # noqa: PLR0915 self, model_group: str, healthy_deployments: list, diff --git a/litellm/router_strategy/lowest_tpm_rpm.py b/litellm/router_strategy/lowest_tpm_rpm.py index 96f655b01..45f32fbf0 100644 --- a/litellm/router_strategy/lowest_tpm_rpm.py +++ b/litellm/router_strategy/lowest_tpm_rpm.py @@ -166,7 +166,7 @@ class LowestTPMLoggingHandler(CustomLogger): verbose_router_logger.debug(traceback.format_exc()) pass - def get_available_deployments( + def get_available_deployments( # noqa: PLR0915 self, model_group: str, healthy_deployments: list, diff --git a/litellm/router_strategy/lowest_tpm_rpm_v2.py b/litellm/router_strategy/lowest_tpm_rpm_v2.py index e09608422..2c62b6c7b 100644 --- a/litellm/router_strategy/lowest_tpm_rpm_v2.py +++ b/litellm/router_strategy/lowest_tpm_rpm_v2.py @@ -318,7 +318,7 @@ class LowestTPMLoggingHandler_v2(CustomLogger): ) pass - def _common_checks_available_deployment( + def _common_checks_available_deployment( # noqa: PLR0915 self, model_group: str, healthy_deployments: list, diff --git a/litellm/router_utils/client_initalization_utils.py b/litellm/router_utils/client_initalization_utils.py index a1d4ee5bd..6c845296a 100644 --- a/litellm/router_utils/client_initalization_utils.py +++ b/litellm/router_utils/client_initalization_utils.py @@ -48,7 +48,7 @@ def should_initialize_sync_client( return True -def set_client(litellm_router_instance: LitellmRouter, model: dict): +def set_client(litellm_router_instance: LitellmRouter, model: dict): # noqa: PLR0915 """ - Initializes Azure/OpenAI clients. Stores them in cache, b/c of this - https://github.com/BerriAI/litellm/issues/1278 - Initializes Semaphore for client w/ rpm. Stores them in cache. b/c of this - https://github.com/BerriAI/litellm/issues/2994 diff --git a/litellm/secret_managers/main.py b/litellm/secret_managers/main.py index 4c7cb469b..522f2bc39 100644 --- a/litellm/secret_managers/main.py +++ b/litellm/secret_managers/main.py @@ -67,7 +67,7 @@ def get_secret_str( return value -def get_secret( +def get_secret( # noqa: PLR0915 secret_name: str, default_value: Optional[Union[str, bool]] = None, ): diff --git a/litellm/utils.py b/litellm/utils.py index 50da9f49a..49f7fe642 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -328,7 +328,7 @@ def custom_llm_setup(): litellm._custom_providers.append(custom_llm["provider"]) -def function_setup( +def function_setup( # noqa: PLR0915 original_function: str, rules_obj, start_time, *args, **kwargs ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. ### NOTICES ### @@ -605,7 +605,7 @@ def function_setup( raise e -def client(original_function): +def client(original_function): # noqa: PLR0915 global liteDebuggerClient rules_obj = Rules() @@ -721,7 +721,7 @@ def client(original_function): raise e @wraps(original_function) - def wrapper(*args, **kwargs): + def wrapper(*args, **kwargs): # noqa: PLR0915 # DO NOT MOVE THIS. It always needs to run first # Check if this is an async function. If so only execute the async function if ( @@ -1007,7 +1007,7 @@ def client(original_function): raise e @wraps(original_function) - async def wrapper_async(*args, **kwargs): + async def wrapper_async(*args, **kwargs): # noqa: PLR0915 print_args_passed_to_litellm(original_function, args, kwargs) start_time = datetime.datetime.now() result = None @@ -1289,7 +1289,7 @@ def decode(model="", tokens: List[int] = [], custom_tokenizer: Optional[dict] = return dec -def openai_token_counter( +def openai_token_counter( # noqa: PLR0915 messages: Optional[list] = None, model="gpt-3.5-turbo-0613", text: Optional[str] = None, @@ -1930,7 +1930,7 @@ def _update_dictionary(existing_dict: Dict, new_dict: dict) -> dict: return existing_dict -def register_model(model_cost: Union[str, dict]): +def register_model(model_cost: Union[str, dict]): # noqa: PLR0915 """ Register new / Override existing models (and their pricing) to specific providers. Provide EITHER a model cost dictionary or a url to a hosted json blob @@ -2253,7 +2253,7 @@ def get_optional_params_image_gen( return optional_params -def get_optional_params_embeddings( +def get_optional_params_embeddings( # noqa: PLR0915 # 2 optional params model: str, user: Optional[str] = None, @@ -2469,7 +2469,7 @@ def _remove_strict_from_schema(schema): return schema -def get_optional_params( +def get_optional_params( # noqa: PLR0915 # use the openai defaults # https://platform.openai.com/docs/api-reference/chat/create model: str, @@ -4077,7 +4077,7 @@ def get_first_chars_messages(kwargs: dict) -> str: return "" -def get_supported_openai_params( +def get_supported_openai_params( # noqa: PLR0915 model: str, custom_llm_provider: Optional[str] = None, request_type: Literal["chat_completion", "embeddings"] = "chat_completion", @@ -4573,7 +4573,9 @@ def _get_model_info_from_model_cost(key: str) -> dict: return litellm.model_cost[key] -def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> ModelInfo: +def get_model_info( # noqa: PLR0915 + model: str, custom_llm_provider: Optional[str] = None +) -> ModelInfo: """ Get a dict for the maximum tokens (context window), input_cost_per_token, output_cost_per_token for a given model. @@ -5116,7 +5118,7 @@ def create_proxy_transport_and_mounts(): return sync_proxy_mounts, async_proxy_mounts -def validate_environment( +def validate_environment( # noqa: PLR0915 model: Optional[str] = None, api_key: Optional[str] = None ) -> dict: """ @@ -5605,7 +5607,7 @@ def _handle_invalid_parallel_tool_calls( return tool_calls -def convert_to_model_response_object( +def convert_to_model_response_object( # noqa: PLR0915 response_object: Optional[dict] = None, model_response_object: Optional[ Union[ @@ -7170,7 +7172,7 @@ class CustomStreamWrapper: is_empty = False return is_empty - def chunk_creator(self, chunk): # type: ignore + def chunk_creator(self, chunk): # type: ignore # noqa: PLR0915 model_response = self.model_response_creator() response_obj = {} try: @@ -7881,7 +7883,7 @@ class CustomStreamWrapper: model_response.choices[0].finish_reason = "tool_calls" return model_response - def __next__(self): + def __next__(self): # noqa: PLR0915 cache_hit = False if ( self.custom_llm_provider is not None @@ -8016,7 +8018,7 @@ class CustomStreamWrapper: return self.completion_stream - async def __anext__(self): + async def __anext__(self): # noqa: PLR0915 cache_hit = False if ( self.custom_llm_provider is not None diff --git a/ruff.toml b/ruff.toml index 623dbc67c..09fccd657 100644 --- a/ruff.toml +++ b/ruff.toml @@ -1,4 +1,4 @@ ignore = ["F405", "E402", "F401", "E501", "F403"] -extend-select = ["E501"] +extend-select = ["E501", "PLR0915"] line-length = 120 -exclude = ["litellm/types/*", "litellm/__init__.py", "litellm/proxy/example_config_yaml/*"] +exclude = ["litellm/types/*", "litellm/__init__.py", "litellm/proxy/example_config_yaml/*"] \ No newline at end of file