fix vertex ai exceptions

This commit is contained in:
Ishaan Jaff 2024-06-07 17:12:30 -07:00
parent a4589e33dd
commit d5e97861ee

View file

@ -9180,16 +9180,12 @@ def exception_type(
exception_mapping_worked = True exception_mapping_worked = True
raise litellm.InternalServerError( raise litellm.InternalServerError(
message=f"litellm.InternalServerError: VertexAIException - {error_str}", message=f"litellm.InternalServerError: VertexAIException - {error_str}",
status_code=500,
model=model, model=model,
llm_provider="vertex_ai", llm_provider="vertex_ai",
request=( response=httpx.Response(
original_exception.request status_code=500,
if hasattr(original_exception, "request") content=str(original_exception),
else httpx.Request( request=httpx.Request(method="completion", url="https://github.com/BerriAI/litellm"), # type: ignore
method="POST",
url=" https://cloud.google.com/vertex-ai/",
)
), ),
litellm_debug_info=extra_information, litellm_debug_info=extra_information,
) )
@ -9264,17 +9260,13 @@ def exception_type(
exception_mapping_worked = True exception_mapping_worked = True
raise litellm.InternalServerError( raise litellm.InternalServerError(
message=f"VertexAIException InternalServerError - {error_str}", message=f"VertexAIException InternalServerError - {error_str}",
status_code=500,
model=model, model=model,
llm_provider="vertex_ai", llm_provider="vertex_ai",
litellm_debug_info=extra_information, litellm_debug_info=extra_information,
request=getattr( response=httpx.Response(
original_exception, status_code=500,
"request", content=str(original_exception),
httpx.Request( request=httpx.Request(method="completion", url="https://github.com/BerriAI/litellm"), # type: ignore
method="POST",
url=" https://cloud.google.com/vertex-ai/",
),
), ),
) )
elif custom_llm_provider == "palm" or custom_llm_provider == "gemini": elif custom_llm_provider == "palm" or custom_llm_provider == "gemini":
@ -9878,7 +9870,11 @@ def exception_type(
llm_provider="azure", llm_provider="azure",
model=model, model=model,
litellm_debug_info=extra_information, litellm_debug_info=extra_information,
request=httpx.Request(method="POST", url="https://openai.com/"), response=httpx.Response(
status_code=400,
content=str(original_exception),
request=httpx.Request(method="completion", url="https://github.com/BerriAI/litellm"), # type: ignore
),
) )
elif "This model's maximum context length is" in error_str: elif "This model's maximum context length is" in error_str:
exception_mapping_worked = True exception_mapping_worked = True