fix(utils.py): raise a bad request error if litellm client raises a model /provider not found error

This commit is contained in:
Krrish Dholakia 2024-01-04 15:50:14 +05:30
parent 8f9009817b
commit 773a0a147a

View file

@ -4081,11 +4081,11 @@ def get_llm_provider(
print() # noqa
error_str = f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model={model}\n Pass model as E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/starcoder',..)` Learn more: https://docs.litellm.ai/docs/providers"
# maps to openai.NotFoundError, this is raised when openai does not recognize the llm
raise litellm.exceptions.NotFoundError( # type: ignore
raise litellm.exceptions.BadRequestError( # type: ignore
message=error_str,
model=model,
response=httpx.Response(
status_code=404,
status_code=400,
content=error_str,
request=httpx.request(method="completion", url="https://github.com/BerriAI/litellm"), # type: ignore
),