docs service accounts (#5900)

This commit is contained in:
Ishaan Jaff 2024-09-25 15:46:13 -07:00 committed by GitHub
parent 2444d76fda
commit 4bdeefd7e4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 129 additions and 10 deletions

View file

@ -0,0 +1,115 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Image from '@theme/IdealImage';
# [Beta] Service Accounts
Use this if you want to create Virtual Keys that are not owned by a specific user but instead created for production projects
## Usage
### 1. Set settings for Service Accounts
Set `service_account_settings` if you want to create settings that only apply to service account keys
```yaml
general_settings:
service_account_settings:
enforced_params: ["user"] # this means the "user" param is enforced for all requests made through any service account keys
```
### 2. Create Service Account Key on LiteLLM Proxy Admin UI
<Image img={require('../../img/create_service_account.png')} />
### 3. Test Service Account Key
<Tabs>
<TabItem value="Unsuccessful call" label="Unsuccessful call">
```shell
curl --location 'http://localhost:4000/chat/completions' \
--header 'Authorization: Bearer <sk-your-service-account>' \
--header 'Content-Type: application/json' \
--data '{
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "hello"
}
]
}'
```
Expected Response
```json
{
"error": {
"message": "BadRequest please pass param=user in request body. This is a required param for service account",
"type": "bad_request_error",
"param": "user",
"code": "400"
}
}
```
</TabItem>
<TabItem value="Successful call" label="Successful call">
```shell
curl --location 'http://localhost:4000/chat/completions' \
--header 'Authorization: Bearer <sk-your-service-account>' \
--header 'Content-Type: application/json' \
--data '{
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "hello"
}
],
"user": "test-user"
}'
```
Expected Response
```json
{
"id": "chatcmpl-ad9595c7e3784a6783b469218d92d95c",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "\n\nHello there, how may I assist you today?",
"role": "assistant",
"tool_calls": null,
"function_call": null
}
}
],
"created": 1677652288,
"model": "gpt-3.5-turbo-0125",
"object": "chat.completion",
"system_fingerprint": "fp_44709d6fcb",
"usage": {
"completion_tokens": 12,
"prompt_tokens": 9,
"total_tokens": 21,
"completion_tokens_details": null
},
"service_tier": null
}
```
</TabItem>
</Tabs>

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

View file

@ -50,7 +50,7 @@ const sidebars = {
{
type: "category",
label: "🔑 Authentication",
items: ["proxy/virtual_keys", "proxy/token_auth", "proxy/oauth2", "proxy/ip_address"],
items: ["proxy/virtual_keys", "proxy/token_auth", "proxy/service_accounts", "proxy/ip_address"],
},
{
type: "category",

View file

@ -2,26 +2,30 @@ model_list:
- model_name: gpt-3.5-turbo
litellm_params:
model: openai/gpt-3.5-turbo
api_key: os.environ/OPENAI_API_KEY
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
model_info:
supported_environments: ["development", "production", "staging"]
- model_name: gpt-4
- model_name: rerank-english-v3.0
litellm_params:
model: openai/gpt-4
api_key: os.environ/OPENAI_API_KEY
model: cohere/rerank-english-v3.0
api_key: os.environ/COHERE_API_KEY
model_info:
supported_environments: ["production", "staging"]
- model_name: gpt-4o
- model_name: llava-hf
litellm_params:
model: openai/gpt-4o
api_key: os.environ/OPENAI_API_KEY
model: openai/llava-hf/llava-v1.6-vicuna-7b-hf
api_key: fake-key
model_info:
supported_environments: ["production"]
supported_environments: ["production", "staging"]
general_settings:
service_account_settings:
enforced_params: ["user"]
litellm_settings:
cache: true
callbacks: ["otel"]
# callbacks: ["otel"]
general_settings: