From f828be7ed7965539d4efcfc4639e44bf17b42a9d Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 15 Dec 2023 13:33:03 +0530 Subject: [PATCH] (feat) add openai.NotFoundError exception mapping --- litellm/utils.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index c5ef1799ba..d21d6fa32f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2854,7 +2854,18 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_ print() # noqa print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m") # noqa print() # noqa - raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers") + error_str = f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model={model}\n Pass model as E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/starcoder',..)` Learn more: https://docs.litellm.ai/docs/providers" + # maps to openai.NotFoundError, this is raised when openai does not recognize the llm + raise litellm.exceptions.NotFoundError( # type: ignore + message=error_str, + model=model, + response=httpx.Response( + status_code=404, + content=error_str, + request=httpx.request(method="completion", url="https://litellm.ai") # type: ignore + ), + llm_provider="" + ) return model, custom_llm_provider, dynamic_api_key, api_base except Exception as e: raise e