diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md index 2552c20042..9381a14a44 100644 --- a/docs/my-website/docs/proxy/configs.md +++ b/docs/my-website/docs/proxy/configs.md @@ -252,6 +252,31 @@ $ litellm --config /path/to/config.yaml ``` +## Multiple OpenAI Organizations + +Add all openai models across all OpenAI organizations with just 1 model definition + +```yaml + - model_name: * + litellm_params: + model: openai/* + api_key: os.environ/OPENAI_API_KEY + organization: + - org-1 + - org-2 + - org-3 +``` + +LiteLLM will automatically create separate deployments for each org. + +Confirm this via + +```bash +curl --location 'http://0.0.0.0:4000/v1/model/info' \ +--header 'Authorization: Bearer ${LITELLM_KEY}' \ +--data '' +``` + ## Load Balancing :::info diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index c19076f299..ec79cbbdf2 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -65,6 +65,8 @@ model_list: output_cost_per_token: 0 model_info: max_input_tokens: 80920 + + assistant_settings: custom_llm_provider: openai litellm_params: diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index f50c138c25..0645f19ac1 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1970,7 +1970,7 @@ class ProxyConfig: router = litellm.Router( **router_params, assistants_config=assistants_config ) # type:ignore - return router, model_list, general_settings + return router, router.get_model_list(), general_settings def get_model_info_with_id(self, model, db_model=False) -> RouterModelInfo: """