From 56bb39e52c9961122942f3ddbe6352541fd6fa3c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 27 Nov 2023 11:34:42 -0800 Subject: [PATCH] fix(acompletion): fix acompletion raise exception issue when custom llm provider is none --- litellm/main.py | 2 ++ litellm/proxy/proxy_server.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 125a2daccf..85091fec99 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -154,6 +154,7 @@ async def acompletion(*args, **kwargs): model = args[0] if len(args) > 0 else kwargs["model"] ### PASS ARGS TO COMPLETION ### kwargs["acompletion"] = True + custom_llm_provider = None try: # Use a partial function to pass your keyword arguments func = partial(completion, *args, **kwargs) @@ -190,6 +191,7 @@ async def acompletion(*args, **kwargs): else: return response except Exception as e: + custom_llm_provider = custom_llm_provider or "openai" raise exception_type( model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args, ) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index dc0ce4a751..6b363df3b9 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -697,7 +697,7 @@ async def completion(request: Request, model: Optional[str] = None, user_api_key @router.post("/chat/completions", dependencies=[Depends(user_api_key_auth)]) @router.post("/openai/deployments/{model:path}/chat/completions", dependencies=[Depends(user_api_key_auth)]) # azure compatible endpoint async def chat_completion(request: Request, model: Optional[str] = None, user_api_key_dict: dict = Depends(user_api_key_auth)): - global general_settings + global general_settings, user_debug try: body = await request.body() body_str = body.decode() @@ -734,6 +734,8 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap return response except Exception as e: print(f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`") + if user_debug: + traceback.print_exc() error_traceback = traceback.format_exc() error_msg = f"{str(e)}\n\n{error_traceback}" try: