diff --git a/docs/my-website/docs/proxy/service_accounts.md b/docs/my-website/docs/proxy/service_accounts.md
new file mode 100644
index 000000000..5825af4cb
--- /dev/null
+++ b/docs/my-website/docs/proxy/service_accounts.md
@@ -0,0 +1,115 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import Image from '@theme/IdealImage';
+
+# [Beta] Service Accounts
+
+Use this if you want to create Virtual Keys that are not owned by a specific user but instead created for production projects
+
+## Usage
+
+### 1. Set settings for Service Accounts
+
+Set `service_account_settings` if you want to create settings that only apply to service account keys
+
+```yaml
+general_settings:
+ service_account_settings:
+ enforced_params: ["user"] # this means the "user" param is enforced for all requests made through any service account keys
+```
+
+### 2. Create Service Account Key on LiteLLM Proxy Admin UI
+
+
+
+### 3. Test Service Account Key
+
+
+
+
+
+
+```shell
+curl --location 'http://localhost:4000/chat/completions' \
+ --header 'Authorization: Bearer ' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ }
+ ]
+}'
+```
+
+Expected Response
+
+```json
+{
+ "error": {
+ "message": "BadRequest please pass param=user in request body. This is a required param for service account",
+ "type": "bad_request_error",
+ "param": "user",
+ "code": "400"
+ }
+}
+```
+
+
+
+
+
+
+```shell
+curl --location 'http://localhost:4000/chat/completions' \
+ --header 'Authorization: Bearer ' \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ }
+ ],
+ "user": "test-user"
+}'
+```
+
+Expected Response
+
+```json
+{
+ "id": "chatcmpl-ad9595c7e3784a6783b469218d92d95c",
+ "choices": [
+ {
+ "finish_reason": "stop",
+ "index": 0,
+ "message": {
+ "content": "\n\nHello there, how may I assist you today?",
+ "role": "assistant",
+ "tool_calls": null,
+ "function_call": null
+ }
+ }
+ ],
+ "created": 1677652288,
+ "model": "gpt-3.5-turbo-0125",
+ "object": "chat.completion",
+ "system_fingerprint": "fp_44709d6fcb",
+ "usage": {
+ "completion_tokens": 12,
+ "prompt_tokens": 9,
+ "total_tokens": 21,
+ "completion_tokens_details": null
+ },
+ "service_tier": null
+}
+```
+
+
+
+
+
diff --git a/docs/my-website/img/create_service_account.png b/docs/my-website/img/create_service_account.png
new file mode 100644
index 000000000..6474028ff
Binary files /dev/null and b/docs/my-website/img/create_service_account.png differ
diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js
index e0512f080..a33e2b601 100644
--- a/docs/my-website/sidebars.js
+++ b/docs/my-website/sidebars.js
@@ -50,7 +50,7 @@ const sidebars = {
{
type: "category",
label: "🔑 Authentication",
- items: ["proxy/virtual_keys", "proxy/token_auth", "proxy/oauth2", "proxy/ip_address"],
+ items: ["proxy/virtual_keys", "proxy/token_auth", "proxy/service_accounts", "proxy/ip_address"],
},
{
type: "category",
diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml
index 69e6b52a3..dda6f8274 100644
--- a/litellm/proxy/proxy_config.yaml
+++ b/litellm/proxy/proxy_config.yaml
@@ -2,26 +2,30 @@ model_list:
- model_name: gpt-3.5-turbo
litellm_params:
model: openai/gpt-3.5-turbo
- api_key: os.environ/OPENAI_API_KEY
+ api_key: fake-key
+ api_base: https://exampleopenaiendpoint-production.up.railway.app/
model_info:
supported_environments: ["development", "production", "staging"]
- - model_name: gpt-4
+ - model_name: rerank-english-v3.0
litellm_params:
- model: openai/gpt-4
- api_key: os.environ/OPENAI_API_KEY
+ model: cohere/rerank-english-v3.0
+ api_key: os.environ/COHERE_API_KEY
model_info:
supported_environments: ["production", "staging"]
- - model_name: gpt-4o
+ - model_name: llava-hf
litellm_params:
- model: openai/gpt-4o
- api_key: os.environ/OPENAI_API_KEY
+ model: openai/llava-hf/llava-v1.6-vicuna-7b-hf
+ api_key: fake-key
model_info:
- supported_environments: ["production"]
+ supported_environments: ["production", "staging"]
+general_settings:
+ service_account_settings:
+ enforced_params: ["user"]
litellm_settings:
cache: true
- callbacks: ["otel"]
+ # callbacks: ["otel"]
general_settings: