fix(router.py): support adding models across multiple orgs, with 1 model definition

This commit is contained in:
Krrish Dholakia 2024-06-18 19:57:10 -07:00
parent 121f4d8a1b
commit aeeaadfaa6
3 changed files with 28 additions and 1 deletions

View file

@ -252,6 +252,31 @@ $ litellm --config /path/to/config.yaml
```
## Multiple OpenAI Organizations
Add all openai models across all OpenAI organizations with just 1 model definition
```yaml
- model_name: *
litellm_params:
model: openai/*
api_key: os.environ/OPENAI_API_KEY
organization:
- org-1
- org-2
- org-3
```
LiteLLM will automatically create separate deployments for each org.
Confirm this via
```bash
curl --location 'http://0.0.0.0:4000/v1/model/info' \
--header 'Authorization: Bearer ${LITELLM_KEY}' \
--data ''
```
## Load Balancing
:::info

View file

@ -65,6 +65,8 @@ model_list:
output_cost_per_token: 0
model_info:
max_input_tokens: 80920
assistant_settings:
custom_llm_provider: openai
litellm_params:

View file

@ -1970,7 +1970,7 @@ class ProxyConfig:
router = litellm.Router(
**router_params, assistants_config=assistants_config
) # type:ignore
return router, model_list, general_settings
return router, router.get_model_list(), general_settings
def get_model_info_with_id(self, model, db_model=False) -> RouterModelInfo:
"""