This commit is contained in:
Damian Gleumes 2025-04-24 00:56:03 -07:00 committed by GitHub
commit fc37bfd9d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 108 additions and 34 deletions

View file

@ -68,12 +68,49 @@ Once you selected a model, enter your message content and click on `Submit`
<Image img={require('../../img/basic_litellm.gif')} /> <Image img={require('../../img/basic_litellm.gif')} />
### 3.2 Tracking Spend / Usage ### 3.2 Tracking Usage & Spend
After your request is made, navigate to `Logs` on the LiteLLM UI, you can see Team, Key, Model, Usage and Cost. #### Basic Tracking
<!-- <Image img={require('../../img/litellm_logs_openweb.gif')} /> --> After making requests, navigate to the `Logs` section in the LiteLLM UI to view Model, Usage and Cost information.
#### Per-User Tracking
To track spend and usage for each Open WebUI user, configure both Open WebUI and LiteLLM:
1. **Enable User Info Headers in Open WebUI**
Set the following environment variable for Open WebUI to enable user information in request headers:
```dotenv
ENABLE_FORWARD_USER_INFO_HEADERS=True
```
For more details, see the [Environment Variable Configuration Guide](https://docs.openwebui.com/getting-started/env-configuration/#enable_forward_user_info_headers).
2. **Configure LiteLLM to Parse User Headers**
Add the following to your LiteLLM `config.yaml` to specify a header to use for user tracking:
```yaml
general_settings:
user_header_name: X-OpenWebUI-User-Id
```
<details>
<summary>ⓘ Available tracking options</summary>
You can use any of the following headers for `user_header_name`:
- `X-OpenWebUI-User-Id`
- `X-OpenWebUI-User-Email`
- `X-OpenWebUI-User-Name`
These may offer better readability and easier mental attribution when hosting for a small group of users that you know well.
Choose based on your needs, but note that in Open WebUI:
- Users can modify their own usernames
- Administrators can modify both usernames and emails of any account
</details>
## Render `thinking` content on Open WebUI ## Render `thinking` content on Open WebUI

View file

@ -2452,6 +2452,7 @@ class LitellmDataForBackendLLMCall(TypedDict, total=False):
headers: dict headers: dict
organization: str organization: str
timeout: Optional[float] timeout: Optional[float]
user: Optional[str]
class JWTKeyItem(TypedDict, total=False): class JWTKeyItem(TypedDict, total=False):

View file

@ -242,6 +242,37 @@ class LiteLLMProxyRequestSetup:
return forwarded_headers return forwarded_headers
@staticmethod
def _get_case_insensitive_header(headers: dict, key: str) -> Optional[str]:
"""
Get a case-insensitive header from the headers dictionary.
"""
for header, value in headers.items():
if header.lower() == key.lower():
return value
return None
@staticmethod
def get_user_from_headers(headers: dict, general_settings: Optional[Dict] = None) -> Optional[str]:
"""
Get the user from the specified header if `general_settings.user_header_name` is set.
"""
if general_settings is None:
return None
header_name = general_settings.get("user_header_name")
if header_name is None or header_name == "":
return None
if not isinstance(header_name, str):
raise TypeError(f"Expected user_header_name to be a str but got {type(header_name)}")
user = LiteLLMProxyRequestSetup._get_case_insensitive_header(headers, header_name)
if user is not None:
verbose_logger.info(f"found user \"{user}\" in header \"{header_name}\"")
return user
@staticmethod @staticmethod
def get_openai_org_id_from_headers( def get_openai_org_id_from_headers(
headers: dict, general_settings: Optional[Dict] = None headers: dict, general_settings: Optional[Dict] = None
@ -293,10 +324,12 @@ class LiteLLMProxyRequestSetup:
general_settings: Optional[Dict[str, Any]] = None, general_settings: Optional[Dict[str, Any]] = None,
) -> LitellmDataForBackendLLMCall: ) -> LitellmDataForBackendLLMCall:
""" """
- Adds user from headers
- Adds forwardable headers - Adds forwardable headers
- Adds org id - Adds org id
""" """
data = LitellmDataForBackendLLMCall() data = LitellmDataForBackendLLMCall()
if ( if (
general_settings general_settings
and general_settings.get("forward_client_headers_to_llm_api") is True and general_settings.get("forward_client_headers_to_llm_api") is True
@ -491,6 +524,14 @@ async def add_litellm_data_to_request( # noqa: PLR0915
) )
) )
# Parse user info from headers
user = LiteLLMProxyRequestSetup.get_user_from_headers(_headers, general_settings)
if user is not None:
if user_api_key_dict.end_user_id is None:
user_api_key_dict.end_user_id = user
if "user" not in data:
data["user"] = user
# Include original request and headers in the data # Include original request and headers in the data
data["proxy_server_request"] = { data["proxy_server_request"] = {
"url": str(request.url), "url": str(request.url),

View file

@ -470,23 +470,18 @@ def test_reading_openai_org_id_from_headers():
@pytest.mark.parametrize( @pytest.mark.parametrize(
"headers, expected_data", "headers, general_settings, expected_data",
[ [
({"OpenAI-Organization": "test_org_id"}, {"organization": "test_org_id"}), ({"OpenAI-Organization": "test_org_id"}, None, {"organization": "test_org_id"}),
({"openai-organization": "test_org_id"}, {"organization": "test_org_id"}), ({"openai-organization": "test_org_id"}, None, {"organization": "test_org_id"}),
({}, {}), ({"OpenAI-Organization": "test_org_id", "Authorization": "Bearer test_token"}, None, {"organization": "test_org_id"}),
( ({"X-OpenWebUI-User-Id": "ishaan3"}, {"user_header_name":"X-OpenWebUI-User-Id"}, {"user": "ishaan3"}),
{ ({"x-openwebui-user-id": "ishaan3"}, {"user_header_name":"X-OpenWebUI-User-Id"}, {"user": "ishaan3"}),
"OpenAI-Organization": "test_org_id", ({"X-OpenWebUI-User-Id": "ishaan3"}, {}, {}),
"Authorization": "Bearer test_token", ({}, None, {}),
},
{
"organization": "test_org_id",
},
),
], ],
) )
def test_add_litellm_data_for_backend_llm_call(headers, expected_data): def test_add_litellm_data_for_backend_llm_call(headers, general_settings, expected_data):
import json import json
from litellm.proxy.litellm_pre_call_utils import LiteLLMProxyRequestSetup from litellm.proxy.litellm_pre_call_utils import LiteLLMProxyRequestSetup
from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy._types import UserAPIKeyAuth
@ -498,7 +493,7 @@ def test_add_litellm_data_for_backend_llm_call(headers, expected_data):
data = LiteLLMProxyRequestSetup.add_litellm_data_for_backend_llm_call( data = LiteLLMProxyRequestSetup.add_litellm_data_for_backend_llm_call(
headers=headers, headers=headers,
user_api_key_dict=user_api_key_dict, user_api_key_dict=user_api_key_dict,
general_settings=None, general_settings=general_settings,
) )
assert json.dumps(data, sort_keys=True) == json.dumps(expected_data, sort_keys=True) assert json.dumps(data, sort_keys=True) == json.dumps(expected_data, sort_keys=True)