forked from phoenix/litellm-mirror
Merge pull request #2723 from BerriAI/litellm_proxy_perf_imp
[FEAT] Improve Proxy Perf - access router model names in constant time
This commit is contained in:
commit
4777921a31
1 changed files with 9 additions and 40 deletions
|
@ -2648,11 +2648,7 @@ async def async_data_generator(response, user_api_key_dict):
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
|
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
|
||||||
)
|
)
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
if user_debug:
|
if user_debug:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
@ -2975,11 +2971,7 @@ async def completion(
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
### ROUTE THE REQUESTs ###
|
### ROUTE THE REQUESTs ###
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
# skip router if user passed their key
|
# skip router if user passed their key
|
||||||
if "api_key" in data:
|
if "api_key" in data:
|
||||||
response = await litellm.atext_completion(**data)
|
response = await litellm.atext_completion(**data)
|
||||||
|
@ -3192,11 +3184,8 @@ async def chat_completion(
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
### ROUTE THE REQUEST ###
|
### ROUTE THE REQUEST ###
|
||||||
router_model_names = (
|
# Do not change this - it should be a constant time fetch - ALWAYS
|
||||||
[m["model_name"] for m in llm_model_list]
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
# skip router if user passed their key
|
# skip router if user passed their key
|
||||||
if "api_key" in data:
|
if "api_key" in data:
|
||||||
tasks.append(litellm.acompletion(**data))
|
tasks.append(litellm.acompletion(**data))
|
||||||
|
@ -3269,11 +3258,7 @@ async def chat_completion(
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
|
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
|
||||||
)
|
)
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
if user_debug:
|
if user_debug:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
@ -3380,11 +3365,7 @@ async def embeddings(
|
||||||
if data["model"] in litellm.model_alias_map:
|
if data["model"] in litellm.model_alias_map:
|
||||||
data["model"] = litellm.model_alias_map[data["model"]]
|
data["model"] = litellm.model_alias_map[data["model"]]
|
||||||
|
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
if (
|
if (
|
||||||
"input" in data
|
"input" in data
|
||||||
and isinstance(data["input"], list)
|
and isinstance(data["input"], list)
|
||||||
|
@ -3555,11 +3536,7 @@ async def image_generation(
|
||||||
if data["model"] in litellm.model_alias_map:
|
if data["model"] in litellm.model_alias_map:
|
||||||
data["model"] = litellm.model_alias_map[data["model"]]
|
data["model"] = litellm.model_alias_map[data["model"]]
|
||||||
|
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
|
|
||||||
### CALL HOOKS ### - modify incoming data / reject request before calling the model
|
### CALL HOOKS ### - modify incoming data / reject request before calling the model
|
||||||
data = await proxy_logging_obj.pre_call_hook(
|
data = await proxy_logging_obj.pre_call_hook(
|
||||||
|
@ -3703,11 +3680,7 @@ async def audio_transcriptions(
|
||||||
**data,
|
**data,
|
||||||
} # add the team-specific configs to the completion call
|
} # add the team-specific configs to the completion call
|
||||||
|
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
file.filename is not None
|
file.filename is not None
|
||||||
|
@ -3872,11 +3845,7 @@ async def moderations(
|
||||||
**data,
|
**data,
|
||||||
} # add the team-specific configs to the completion call
|
} # add the team-specific configs to the completion call
|
||||||
|
|
||||||
router_model_names = (
|
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||||
[m["model_name"] for m in llm_model_list]
|
|
||||||
if llm_model_list is not None
|
|
||||||
else []
|
|
||||||
)
|
|
||||||
|
|
||||||
### CALL HOOKS ### - modify incoming data / reject request before calling the model
|
### CALL HOOKS ### - modify incoming data / reject request before calling the model
|
||||||
data = await proxy_logging_obj.pre_call_hook(
|
data = await proxy_logging_obj.pre_call_hook(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue