build(pyproject.toml): add new dev dependencies - for type checking (#9631)

* build(pyproject.toml): add new dev dependencies - for type checking

* build: reformat files to fit black

* ci: reformat to fit black

* ci(test-litellm.yml): make tests run clear

* build(pyproject.toml): add ruff

* fix: fix ruff checks

* build(mypy/): fix mypy linting errors

* fix(hashicorp_secret_manager.py): fix passing cert for tls auth

* build(mypy/): resolve all mypy errors

* test: update test

* fix: fix black formatting

* build(pre-commit-config.yaml): use poetry run black

* fix(proxy_server.py): fix linting error

* fix: fix ruff safe representation error
This commit is contained in:
Krish Dholakia 2025-03-29 11:02:13 -07:00 committed by GitHub
parent 72198737f8
commit d7b294dd0a
214 changed files with 1553 additions and 1433 deletions

View file

@ -946,14 +946,16 @@ def completion( # type: ignore # noqa: PLR0915
## PROMPT MANAGEMENT HOOKS ##
if isinstance(litellm_logging_obj, LiteLLMLoggingObj) and prompt_id is not None:
model, messages, optional_params = (
litellm_logging_obj.get_chat_completion_prompt(
model=model,
messages=messages,
non_default_params=non_default_params,
prompt_id=prompt_id,
prompt_variables=prompt_variables,
)
(
model,
messages,
optional_params,
) = litellm_logging_obj.get_chat_completion_prompt(
model=model,
messages=messages,
non_default_params=non_default_params,
prompt_id=prompt_id,
prompt_variables=prompt_variables,
)
try:
@ -1246,7 +1248,6 @@ def completion( # type: ignore # noqa: PLR0915
optional_params["max_retries"] = max_retries
if litellm.AzureOpenAIO1Config().is_o_series_model(model=model):
## LOAD CONFIG - if set
config = litellm.AzureOpenAIO1Config.get_config()
for k, v in config.items():
@ -2654,9 +2655,9 @@ def completion( # type: ignore # noqa: PLR0915
"aws_region_name" not in optional_params
or optional_params["aws_region_name"] is None
):
optional_params["aws_region_name"] = (
aws_bedrock_client.meta.region_name
)
optional_params[
"aws_region_name"
] = aws_bedrock_client.meta.region_name
bedrock_route = BedrockModelInfo.get_bedrock_route(model)
if bedrock_route == "converse":
@ -4362,9 +4363,9 @@ def adapter_completion(
new_kwargs = translation_obj.translate_completion_input_params(kwargs=kwargs)
response: Union[ModelResponse, CustomStreamWrapper] = completion(**new_kwargs) # type: ignore
translated_response: Optional[Union[BaseModel, AdapterCompletionStreamWrapper]] = (
None
)
translated_response: Optional[
Union[BaseModel, AdapterCompletionStreamWrapper]
] = None
if isinstance(response, ModelResponse):
translated_response = translation_obj.translate_completion_output_params(
response=response
@ -4436,13 +4437,16 @@ async def amoderation(
optional_params = GenericLiteLLMParams(**kwargs)
try:
model, _custom_llm_provider, _dynamic_api_key, _dynamic_api_base = (
litellm.get_llm_provider(
model=model or "",
custom_llm_provider=custom_llm_provider,
api_base=optional_params.api_base,
api_key=optional_params.api_key,
)
(
model,
_custom_llm_provider,
_dynamic_api_key,
_dynamic_api_base,
) = litellm.get_llm_provider(
model=model or "",
custom_llm_provider=custom_llm_provider,
api_base=optional_params.api_base,
api_key=optional_params.api_key,
)
except litellm.BadRequestError:
# `model` is optional field for moderation - get_llm_provider will throw BadRequestError if model is not set / not recognized
@ -5405,7 +5409,6 @@ def speech( # noqa: PLR0915
litellm_params=litellm_params_dict,
)
elif custom_llm_provider == "vertex_ai" or custom_llm_provider == "vertex_ai_beta":
generic_optional_params = GenericLiteLLMParams(**kwargs)
api_base = generic_optional_params.api_base or ""
@ -5460,7 +5463,6 @@ def speech( # noqa: PLR0915
async def ahealth_check_wildcard_models(
model: str, custom_llm_provider: str, model_params: dict
) -> dict:
# this is a wildcard model, we need to pick a random model from the provider
cheapest_models = pick_cheapest_chat_models_from_llm_provider(
custom_llm_provider=custom_llm_provider, n=3
@ -5783,9 +5785,9 @@ def stream_chunk_builder( # noqa: PLR0915
]
if len(content_chunks) > 0:
response["choices"][0]["message"]["content"] = (
processor.get_combined_content(content_chunks)
)
response["choices"][0]["message"][
"content"
] = processor.get_combined_content(content_chunks)
audio_chunks = [
chunk