mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(main.py): fix key leak error when unknown provider given (#8556)
* fix(main.py): fix key leak error when unknown provider given don't return passed in args if unknown route on embedding * fix(main.py): remove instances of {args} being passed in exception prevent potential key leaks * test(code_coverage/prevent_key_leaks_in_codebase.py): ban usage of {args} in codebase * fix: fix linting errors * fix: remove unused variable
This commit is contained in:
parent
c373dc3784
commit
07dab2f91a
8 changed files with 193 additions and 30 deletions
|
@ -50,6 +50,7 @@ from litellm import ( # type: ignore
|
|||
get_litellm_params,
|
||||
get_optional_params,
|
||||
)
|
||||
from litellm.exceptions import LiteLLMUnknownProvider
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.litellm_core_utils.audio_utils.utils import get_audio_file_for_health_check
|
||||
from litellm.litellm_core_utils.health_check_utils import (
|
||||
|
@ -3036,8 +3037,8 @@ def completion( # type: ignore # noqa: PLR0915
|
|||
custom_handler = item["custom_handler"]
|
||||
|
||||
if custom_handler is None:
|
||||
raise ValueError(
|
||||
f"Unable to map your input to a model. Check your input - {args}"
|
||||
raise LiteLLMUnknownProvider(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
|
||||
## ROUTE LLM CALL ##
|
||||
|
@ -3075,8 +3076,8 @@ def completion( # type: ignore # noqa: PLR0915
|
|||
)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unable to map your input to a model. Check your input - {args}"
|
||||
raise LiteLLMUnknownProvider(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
return response
|
||||
except Exception as e:
|
||||
|
@ -3263,17 +3264,10 @@ def embedding( # noqa: PLR0915
|
|||
"""
|
||||
azure = kwargs.get("azure", None)
|
||||
client = kwargs.pop("client", None)
|
||||
rpm = kwargs.pop("rpm", None)
|
||||
tpm = kwargs.pop("tpm", None)
|
||||
max_retries = kwargs.get("max_retries", None)
|
||||
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
|
||||
cooldown_time = kwargs.get("cooldown_time", None)
|
||||
mock_response: Optional[List[float]] = kwargs.get("mock_response", None) # type: ignore
|
||||
max_parallel_requests = kwargs.pop("max_parallel_requests", None)
|
||||
azure_ad_token_provider = kwargs.pop("azure_ad_token_provider", None)
|
||||
model_info = kwargs.get("model_info", None)
|
||||
metadata = kwargs.get("metadata", None)
|
||||
proxy_server_request = kwargs.get("proxy_server_request", None)
|
||||
aembedding = kwargs.get("aembedding", None)
|
||||
extra_headers = kwargs.get("extra_headers", None)
|
||||
headers = kwargs.get("headers", None)
|
||||
|
@ -3366,7 +3360,6 @@ def embedding( # noqa: PLR0915
|
|||
|
||||
if azure is True or custom_llm_provider == "azure":
|
||||
# azure configs
|
||||
api_type = get_secret_str("AZURE_API_TYPE") or "azure"
|
||||
|
||||
api_base = api_base or litellm.api_base or get_secret_str("AZURE_API_BASE")
|
||||
|
||||
|
@ -3439,7 +3432,6 @@ def embedding( # noqa: PLR0915
|
|||
if extra_headers is not None:
|
||||
optional_params["extra_headers"] = extra_headers
|
||||
|
||||
api_type = "openai"
|
||||
api_version = None
|
||||
|
||||
## EMBEDDING CALL
|
||||
|
@ -3850,14 +3842,16 @@ def embedding( # noqa: PLR0915
|
|||
aembedding=aembedding,
|
||||
)
|
||||
else:
|
||||
args = locals()
|
||||
raise ValueError(f"No valid embedding model args passed in - {args}")
|
||||
raise LiteLLMUnknownProvider(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
if response is not None and hasattr(response, "_hidden_params"):
|
||||
response._hidden_params["custom_llm_provider"] = custom_llm_provider
|
||||
|
||||
if response is None:
|
||||
args = locals()
|
||||
raise ValueError(f"No valid embedding model args passed in - {args}")
|
||||
raise LiteLLMUnknownProvider(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
return response
|
||||
except Exception as e:
|
||||
## LOGGING
|
||||
|
@ -4667,8 +4661,8 @@ def image_generation( # noqa: PLR0915
|
|||
custom_handler = item["custom_handler"]
|
||||
|
||||
if custom_handler is None:
|
||||
raise ValueError(
|
||||
f"Unable to map your input to a model. Check your input - {args}"
|
||||
raise LiteLLMUnknownProvider(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
|
||||
## ROUTE LLM CALL ##
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue