fix(acompletion): fix acompletion raise exception issue when custom llm provider is none

This commit is contained in:
Krrish Dholakia 2023-11-27 11:34:42 -08:00
parent 37f3b1edd1
commit 56bb39e52c
2 changed files with 5 additions and 1 deletions

View file

@ -154,6 +154,7 @@ async def acompletion(*args, **kwargs):
model = args[0] if len(args) > 0 else kwargs["model"]
### PASS ARGS TO COMPLETION ###
kwargs["acompletion"] = True
custom_llm_provider = None
try:
# Use a partial function to pass your keyword arguments
func = partial(completion, *args, **kwargs)
@ -190,6 +191,7 @@ async def acompletion(*args, **kwargs):
else:
return response
except Exception as e:
custom_llm_provider = custom_llm_provider or "openai"
raise exception_type(
model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args,
)

View file

@ -697,7 +697,7 @@ async def completion(request: Request, model: Optional[str] = None, user_api_key
@router.post("/chat/completions", dependencies=[Depends(user_api_key_auth)])
@router.post("/openai/deployments/{model:path}/chat/completions", dependencies=[Depends(user_api_key_auth)]) # azure compatible endpoint
async def chat_completion(request: Request, model: Optional[str] = None, user_api_key_dict: dict = Depends(user_api_key_auth)):
global general_settings
global general_settings, user_debug
try:
body = await request.body()
body_str = body.decode()
@ -734,6 +734,8 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap
return response
except Exception as e:
print(f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`")
if user_debug:
traceback.print_exc()
error_traceback = traceback.format_exc()
error_msg = f"{str(e)}\n\n{error_traceback}"
try: