forked from phoenix/litellm-mirror
test: handle gemini overloaded model error
This commit is contained in:
parent
ffbdaf868f
commit
b92700cc19
2 changed files with 8 additions and 5 deletions
|
@ -1124,10 +1124,13 @@ def exception_type( # type: ignore # noqa: PLR0915
|
|||
),
|
||||
),
|
||||
)
|
||||
elif "500 Internal Server Error" in error_str:
|
||||
elif (
|
||||
"500 Internal Server Error" in error_str
|
||||
or "The model is overloaded." in error_str
|
||||
):
|
||||
exception_mapping_worked = True
|
||||
raise ServiceUnavailableError(
|
||||
message=f"litellm.ServiceUnavailableError: VertexAIException - {error_str}",
|
||||
raise litellm.InternalServerError(
|
||||
message=f"litellm.InternalServerError: VertexAIException - {error_str}",
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
litellm_debug_info=extra_information,
|
||||
|
|
|
@ -205,8 +205,8 @@ def test_stream_chunk_builder_litellm_usage_chunks():
|
|||
complete_response=True,
|
||||
stream_options={"include_usage": True},
|
||||
)
|
||||
except litellm.ServiceUnavailableError as e:
|
||||
pytest.skip(f"ServiceUnavailableError - {str(e)}")
|
||||
except litellm.InternalServerError as e:
|
||||
pytest.skip(f"Skipping test due to internal server error - {str(e)}")
|
||||
|
||||
usage: litellm.Usage = response.usage
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue