Merge pull request #3585 from BerriAI/litellm_router_batch_comp

[Litellm Proxy + litellm.Router] - Pass the same message/prompt to N models
This commit is contained in:
Ishaan Jaff 2024-05-11 13:51:45 -07:00 committed by GitHub
commit bf909a89f8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 213 additions and 2 deletions

View file

@ -606,6 +606,33 @@ class Router:
self.fail_calls[model_name] += 1
raise e
async def abatch_completion(
self, models: List[str], messages: List[Dict[str, str]], **kwargs
):
async def _async_completion_no_exceptions(
model: str, messages: List[Dict[str, str]], **kwargs
):
"""
Wrapper around self.async_completion that catches exceptions and returns them as a result
"""
try:
return await self.acompletion(model=model, messages=messages, **kwargs)
except Exception as e:
return e
_tasks = []
for model in models:
# add each task but if the task fails
_tasks.append(
_async_completion_no_exceptions(
model=model, messages=messages, **kwargs
)
)
response = await asyncio.gather(*_tasks)
return response
def image_generation(self, prompt: str, model: str, **kwargs):
try:
kwargs["model"] = model