mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
fix(router.py): support adding models across multiple orgs, with 1 model definition
This commit is contained in:
parent
121f4d8a1b
commit
aeeaadfaa6
3 changed files with 28 additions and 1 deletions
|
@ -252,6 +252,31 @@ $ litellm --config /path/to/config.yaml
|
|||
```
|
||||
|
||||
|
||||
## Multiple OpenAI Organizations
|
||||
|
||||
Add all openai models across all OpenAI organizations with just 1 model definition
|
||||
|
||||
```yaml
|
||||
- model_name: *
|
||||
litellm_params:
|
||||
model: openai/*
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
organization:
|
||||
- org-1
|
||||
- org-2
|
||||
- org-3
|
||||
```
|
||||
|
||||
LiteLLM will automatically create separate deployments for each org.
|
||||
|
||||
Confirm this via
|
||||
|
||||
```bash
|
||||
curl --location 'http://0.0.0.0:4000/v1/model/info' \
|
||||
--header 'Authorization: Bearer ${LITELLM_KEY}' \
|
||||
--data ''
|
||||
```
|
||||
|
||||
## Load Balancing
|
||||
|
||||
:::info
|
||||
|
|
|
@ -65,6 +65,8 @@ model_list:
|
|||
output_cost_per_token: 0
|
||||
model_info:
|
||||
max_input_tokens: 80920
|
||||
|
||||
|
||||
assistant_settings:
|
||||
custom_llm_provider: openai
|
||||
litellm_params:
|
||||
|
|
|
@ -1970,7 +1970,7 @@ class ProxyConfig:
|
|||
router = litellm.Router(
|
||||
**router_params, assistants_config=assistants_config
|
||||
) # type:ignore
|
||||
return router, model_list, general_settings
|
||||
return router, router.get_model_list(), general_settings
|
||||
|
||||
def get_model_info_with_id(self, model, db_model=False) -> RouterModelInfo:
|
||||
"""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue