Merge branch 'main' into litellm_response_cost_headers

This commit is contained in:
Krish Dholakia 2024-06-27 21:33:09 -07:00 committed by GitHub
commit fa1cb7d903
124 changed files with 3705 additions and 150 deletions

View file

@ -2956,6 +2956,11 @@ async def chat_completion(
if isinstance(data["model"], str) and data["model"] in litellm.model_alias_map:
data["model"] = litellm.model_alias_map[data["model"]]
### CALL HOOKS ### - modify/reject incoming data before calling the model
data = await proxy_logging_obj.pre_call_hook( # type: ignore
user_api_key_dict=user_api_key_dict, data=data, call_type="completion"
)
## LOGGING OBJECT ## - initialize logging object for logging success/failure events for call
data["litellm_call_id"] = str(uuid.uuid4())
logging_obj, data = litellm.utils.function_setup(
@ -2967,11 +2972,6 @@ async def chat_completion(
data["litellm_logging_obj"] = logging_obj
### CALL HOOKS ### - modify/reject incoming data before calling the model
data = await proxy_logging_obj.pre_call_hook( # type: ignore
user_api_key_dict=user_api_key_dict, data=data, call_type="completion"
)
tasks = []
tasks.append(
proxy_logging_obj.during_call_hook(
@ -6300,7 +6300,7 @@ async def model_info_v2(
raise HTTPException(
status_code=500,
detail={
"error": f"Invalid llm model list. llm_model_list={llm_model_list}"
"error": f"No model list passed, models={llm_model_list}. You can add a model through the config.yaml or on the LiteLLM Admin UI."
},
)