litellm-mirror/tests/local_testing/test_auth_utils.py
Krish Dholakia 1e011b66d3
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 13s
Ollama ssl verify = False + Spend Logs reliability fixes (#7931)
* fix(http_handler.py): support passing ssl verify dynamically and using the correct httpx client based on passed ssl verify param

Fixes https://github.com/BerriAI/litellm/issues/6499

* feat(llm_http_handler.py): support passing `ssl_verify=False` dynamically in call args

Closes https://github.com/BerriAI/litellm/issues/6499

* fix(proxy/utils.py): prevent bad logs from breaking all cost tracking + reset list regardless of success/failure

prevents malformed logs from causing all spend tracking to break since they're constantly retried

* test(test_proxy_utils.py): add test to ensure bad log is dropped

* test(test_proxy_utils.py): ensure in-memory spend logs reset after bad log error

* test(test_user_api_key_auth.py): add unit test to ensure end user id as str works

* fix(auth_utils.py): ensure extracted end user id is always a str

prevents db cost tracking errors

* test(test_auth_utils.py): ensure get end user id from request body always returns a string

* test: update tests

* test: skip bedrock test- behaviour now supported

* test: fix testing

* refactor(spend_tracking_utils.py): reduce size of get_logging_payload

* test: fix test

* bump: version 1.59.4 → 1.59.5

* Revert "bump: version 1.59.4 → 1.59.5"

This reverts commit 1182b46b2e.

* fix(utils.py): fix spend logs retry logic

* fix(spend_tracking_utils.py): fix get tags

* fix(spend_tracking_utils.py): fix end user id spend tracking on pass-through endpoints
2025-01-23 23:05:41 -08:00

79 lines
2.3 KiB
Python

# What is this?
## Tests if proxy/auth/auth_utils.py works as expected
import sys, os, asyncio, time, random, uuid
import traceback
from dotenv import load_dotenv
load_dotenv()
import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest
import litellm
from litellm.proxy.auth.auth_utils import (
_allow_model_level_clientside_configurable_parameters,
)
from litellm.router import Router
@pytest.mark.parametrize(
"allowed_param, input_value, should_return_true",
[
("api_base", {"api_base": "http://dummy.com"}, True),
(
{"api_base": "https://api.openai.com/v1"},
{"api_base": "https://api.openai.com/v1"},
True,
), # should return True
(
{"api_base": "https://api.openai.com/v1"},
{"api_base": "https://api.anthropic.com/v1"},
False,
), # should return False
(
{"api_base": "^https://litellm.*direct\.fireworks\.ai/v1$"},
{"api_base": "https://litellm-dev.direct.fireworks.ai/v1"},
True,
),
(
{"api_base": "^https://litellm.*novice\.fireworks\.ai/v1$"},
{"api_base": "https://litellm-dev.direct.fireworks.ai/v1"},
False,
),
],
)
def test_configurable_clientside_parameters(
allowed_param, input_value, should_return_true
):
router = Router(
model_list=[
{
"model_name": "dummy-model",
"litellm_params": {
"model": "gpt-3.5-turbo",
"api_key": "dummy-key",
"configurable_clientside_auth_params": [allowed_param],
},
}
]
)
resp = _allow_model_level_clientside_configurable_parameters(
model="dummy-model",
param="api_base",
request_body_value=input_value["api_base"],
llm_router=router,
)
print(resp)
assert resp == should_return_true
def test_get_end_user_id_from_request_body_always_returns_str():
from litellm.proxy.auth.auth_utils import get_end_user_id_from_request_body
request_body = {"user": 123}
end_user_id = get_end_user_id_from_request_body(request_body)
assert end_user_id == "123"
assert isinstance(end_user_id, str)