mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 10:14:26 +00:00
fix: duplicate exception_type for gemini
This commit is contained in:
parent
73ccbc0f14
commit
694488d2ce
1 changed files with 13 additions and 6 deletions
|
@ -1018,7 +1018,6 @@ def exception_type( # type: ignore # noqa: PLR0915
|
|||
elif (
|
||||
custom_llm_provider == "vertex_ai"
|
||||
or custom_llm_provider == "vertex_ai_beta"
|
||||
or custom_llm_provider == "gemini"
|
||||
):
|
||||
if (
|
||||
"Vertex AI API has not been used in project" in error_str
|
||||
|
@ -1216,7 +1215,7 @@ def exception_type( # type: ignore # noqa: PLR0915
|
|||
raise BadRequestError(
|
||||
message="GeminiException - Invalid api key",
|
||||
model=model,
|
||||
llm_provider="palm",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=getattr(original_exception, "response", None),
|
||||
)
|
||||
if (
|
||||
|
@ -1227,14 +1226,14 @@ def exception_type( # type: ignore # noqa: PLR0915
|
|||
raise Timeout(
|
||||
message=f"GeminiException - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="palm",
|
||||
llm_provider=custom_llm_provider,
|
||||
)
|
||||
if "400 Request payload size exceeds" in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise ContextWindowExceededError(
|
||||
message=f"GeminiException - {error_str}",
|
||||
model=model,
|
||||
llm_provider="palm",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=getattr(original_exception, "response", None),
|
||||
)
|
||||
if (
|
||||
|
@ -1245,7 +1244,7 @@ def exception_type( # type: ignore # noqa: PLR0915
|
|||
raise APIError(
|
||||
status_code=getattr(original_exception, "status_code", 500),
|
||||
message=f"GeminiException - {original_exception.message}",
|
||||
llm_provider="palm",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
request=httpx.Response(
|
||||
status_code=429,
|
||||
|
@ -1261,7 +1260,15 @@ def exception_type( # type: ignore # noqa: PLR0915
|
|||
raise BadRequestError(
|
||||
message=f"GeminiException - {error_str}",
|
||||
model=model,
|
||||
llm_provider="palm",
|
||||
llm_provider=custom_llm_provider,
|
||||
response=getattr(original_exception, "response", None),
|
||||
)
|
||||
if original_exception.status_code == 503:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"GeminiException - {error_str}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=getattr(original_exception, "response", None),
|
||||
)
|
||||
# Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue