(fix) proxy raise exceptions from litellm.completion()

This commit is contained in:
ishaan-jaff 2023-11-15 16:20:42 -08:00
parent e698f8218f
commit a18fe5a1fd

View file

@ -455,6 +455,7 @@ def litellm_completion(*args, **kwargs):
kwargs["api_base"] = user_api_base kwargs["api_base"] = user_api_base
## ROUTE TO CORRECT ENDPOINT ## ## ROUTE TO CORRECT ENDPOINT ##
router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else [] router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else []
try:
if llm_router is not None and kwargs["model"] in router_model_names: # model in router model list if llm_router is not None and kwargs["model"] in router_model_names: # model in router model list
if call_type == "chat_completion": if call_type == "chat_completion":
response = llm_router.completion(*args, **kwargs) response = llm_router.completion(*args, **kwargs)
@ -465,6 +466,8 @@ def litellm_completion(*args, **kwargs):
response = litellm.completion(*args, **kwargs) response = litellm.completion(*args, **kwargs)
elif call_type == "text_completion": elif call_type == "text_completion":
response = litellm.text_completion(*args, **kwargs) response = litellm.text_completion(*args, **kwargs)
except Exception as e:
raise e
if 'stream' in kwargs and kwargs['stream'] == True: # use generate_responses to stream responses if 'stream' in kwargs and kwargs['stream'] == True: # use generate_responses to stream responses
return StreamingResponse(data_generator(response), media_type='text/event-stream') return StreamingResponse(data_generator(response), media_type='text/event-stream')
return response return response