Merge pull request #5101 from BerriAI/litellm_router_prioritization

feat(router.py): allows /chat/completion endpoint to work for request prioritization calls
This commit is contained in:
Krish Dholakia 2024-08-07 20:00:06 -07:00 committed by GitHub
commit 37ef63f522
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 11 additions and 2 deletions

View file

@ -193,12 +193,12 @@ const sidebars = {
"vertex_ai"
],
},
"scheduler",
{
type: "category",
label: "🚅 LiteLLM Python SDK",
items: [
"routing",
"scheduler",
"set_keys",
"completion/token_usage",
"sdk_custom_pricing",

View file

@ -654,6 +654,11 @@ class Router:
timeout = kwargs.get("request_timeout", self.timeout)
kwargs.setdefault("metadata", {}).update({"model_group": model})
if kwargs.get("priority", None) is not None and isinstance(
kwargs.get("priority"), int
):
response = await self.schedule_acompletion(**kwargs)
else:
response = await self.async_function_with_fallbacks(**kwargs)
return response
@ -1097,6 +1102,10 @@ class Router:
_response = await self.acompletion(
model=model, messages=messages, stream=stream, **kwargs
)
_response._hidden_params.setdefault("additional_headers", {})
_response._hidden_params["additional_headers"].update(
{"x-litellm-request-prioritization-used": True}
)
return _response
except Exception as e:
setattr(e, "priority", priority)