forked from phoenix/litellm-mirror
test: handle gemini overloaded model error
This commit is contained in:
parent
ffbdaf868f
commit
b92700cc19
2 changed files with 8 additions and 5 deletions
|
@ -1124,10 +1124,13 @@ def exception_type( # type: ignore # noqa: PLR0915
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
elif "500 Internal Server Error" in error_str:
|
elif (
|
||||||
|
"500 Internal Server Error" in error_str
|
||||||
|
or "The model is overloaded." in error_str
|
||||||
|
):
|
||||||
exception_mapping_worked = True
|
exception_mapping_worked = True
|
||||||
raise ServiceUnavailableError(
|
raise litellm.InternalServerError(
|
||||||
message=f"litellm.ServiceUnavailableError: VertexAIException - {error_str}",
|
message=f"litellm.InternalServerError: VertexAIException - {error_str}",
|
||||||
model=model,
|
model=model,
|
||||||
llm_provider="vertex_ai",
|
llm_provider="vertex_ai",
|
||||||
litellm_debug_info=extra_information,
|
litellm_debug_info=extra_information,
|
||||||
|
|
|
@ -205,8 +205,8 @@ def test_stream_chunk_builder_litellm_usage_chunks():
|
||||||
complete_response=True,
|
complete_response=True,
|
||||||
stream_options={"include_usage": True},
|
stream_options={"include_usage": True},
|
||||||
)
|
)
|
||||||
except litellm.ServiceUnavailableError as e:
|
except litellm.InternalServerError as e:
|
||||||
pytest.skip(f"ServiceUnavailableError - {str(e)}")
|
pytest.skip(f"Skipping test due to internal server error - {str(e)}")
|
||||||
|
|
||||||
usage: litellm.Usage = response.usage
|
usage: litellm.Usage = response.usage
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue