forked from phoenix/litellm-mirror
Merge pull request #3087 from BerriAI/litellm_improve_vertex_exceptions
Fix - show `model`, `deployment` and `model_group` in vertex exceptions
This commit is contained in:
commit
e20577d80c
2 changed files with 62 additions and 0 deletions
|
@ -536,6 +536,55 @@ def test_completion_openai_api_key_exception():
|
|||
|
||||
# tesy_async_acompletion()
|
||||
|
||||
|
||||
def test_router_completion_vertex_exception():
|
||||
try:
|
||||
import litellm
|
||||
|
||||
litellm.set_verbose = True
|
||||
router = litellm.Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "vertex-gemini-pro",
|
||||
"litellm_params": {
|
||||
"model": "vertex_ai/gemini-pro",
|
||||
"api_key": "good-morning",
|
||||
},
|
||||
},
|
||||
]
|
||||
)
|
||||
response = router.completion(
|
||||
model="vertex-gemini-pro",
|
||||
messages=[{"role": "user", "content": "hello"}],
|
||||
vertex_project="bad-project",
|
||||
)
|
||||
pytest.fail("Request should have failed - bad api key")
|
||||
except Exception as e:
|
||||
print("exception: ", e)
|
||||
assert "model: vertex_ai/gemini-pro" in str(e)
|
||||
assert "model_group: vertex-gemini-pro" in str(e)
|
||||
assert "deployment: vertex_ai/gemini-pro" in str(e)
|
||||
|
||||
|
||||
def test_litellm_completion_vertex_exception():
|
||||
try:
|
||||
import litellm
|
||||
|
||||
litellm.set_verbose = True
|
||||
response = completion(
|
||||
model="vertex_ai/gemini-pro",
|
||||
api_key="good-morning",
|
||||
messages=[{"role": "user", "content": "hello"}],
|
||||
vertex_project="bad-project",
|
||||
)
|
||||
pytest.fail("Request should have failed - bad api key")
|
||||
except Exception as e:
|
||||
print("exception: ", e)
|
||||
assert "model: vertex_ai/gemini-pro" in str(e)
|
||||
assert "model_group" not in str(e)
|
||||
assert "deployment" not in str(e)
|
||||
|
||||
|
||||
# # test_invalid_request_error(model="command-nightly")
|
||||
# # Test 3: Rate Limit Errors
|
||||
# def test_model_call(model):
|
||||
|
|
|
@ -7812,6 +7812,19 @@ def exception_type(
|
|||
response=original_exception.response,
|
||||
)
|
||||
elif custom_llm_provider == "vertex_ai":
|
||||
if completion_kwargs is not None:
|
||||
# add model, deployment and model_group to the exception message
|
||||
_model = completion_kwargs.get("model")
|
||||
_kwargs = completion_kwargs.get("kwargs", {}) or {}
|
||||
_metadata = _kwargs.get("metadata", {}) or {}
|
||||
_model_group = _metadata.get("model_group")
|
||||
_deployment = _metadata.get("deployment")
|
||||
error_str += f"\nmodel: {_model}\n"
|
||||
if _model_group is not None:
|
||||
error_str += f"model_group: {_model_group}\n"
|
||||
if _deployment is not None:
|
||||
error_str += f"deployment: {_deployment}\n"
|
||||
|
||||
if (
|
||||
"Vertex AI API has not been used in project" in error_str
|
||||
or "Unable to find your project" in error_str
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue