Merge pull request #3585 from BerriAI/litellm_router_batch_comp

[Litellm Proxy + litellm.Router] - Pass the same message/prompt to N models
This commit is contained in:
Ishaan Jaff 2024-05-11 13:51:45 -07:00 committed by GitHub
commit bf909a89f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 213 additions and 2 deletions

View file

@ -3656,7 +3656,7 @@ async def chat_completion(
### MODEL ALIAS MAPPING ###
# check if model name in model alias map
# get the actual model name
if data["model"] in litellm.model_alias_map:
if isinstance(data["model"], str) and data["model"] in litellm.model_alias_map:
data["model"] = litellm.model_alias_map[data["model"]]
## LOGGING OBJECT ## - initialize logging object for logging success/failure events for call
@ -3690,6 +3690,9 @@ async def chat_completion(
# skip router if user passed their key
if "api_key" in data:
tasks.append(litellm.acompletion(**data))
elif isinstance(data["model"], list) and llm_router is not None:
_models = data.pop("model")
tasks.append(llm_router.abatch_completion(models=_models, **data))
elif "user_config" in data:
# initialize a new router instance. make request using this Router
router_config = data.pop("user_config")