Ollama ssl verify = False + Spend Logs reliability fixes (#7931)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 13s

* fix(http_handler.py): support passing ssl verify dynamically and using the correct httpx client based on passed ssl verify param

Fixes https://github.com/BerriAI/litellm/issues/6499

* feat(llm_http_handler.py): support passing `ssl_verify=False` dynamically in call args

Closes https://github.com/BerriAI/litellm/issues/6499

* fix(proxy/utils.py): prevent bad logs from breaking all cost tracking + reset list regardless of success/failure

prevents malformed logs from causing all spend tracking to break since they're constantly retried

* test(test_proxy_utils.py): add test to ensure bad log is dropped

* test(test_proxy_utils.py): ensure in-memory spend logs reset after bad log error

* test(test_user_api_key_auth.py): add unit test to ensure end user id as str works

* fix(auth_utils.py): ensure extracted end user id is always a str

prevents db cost tracking errors

* test(test_auth_utils.py): ensure get end user id from request body always returns a string

* test: update tests

* test: skip bedrock test- behaviour now supported

* test: fix testing

* refactor(spend_tracking_utils.py): reduce size of get_logging_payload

* test: fix test

* bump: version 1.59.4 → 1.59.5

* Revert "bump: version 1.59.4 → 1.59.5"

This reverts commit 1182b46b2e.

* fix(utils.py): fix spend logs retry logic

* fix(spend_tracking_utils.py): fix get tags

* fix(spend_tracking_utils.py): fix end user id spend tracking on pass-through endpoints
This commit is contained in:
Krish Dholakia 2025-01-23 23:05:41 -08:00 committed by GitHub
parent 851b0c4c4d
commit 1e011b66d3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 406 additions and 187 deletions

View file

@ -158,7 +158,8 @@ class BaseLLMHTTPHandler:
):
if client is None:
async_httpx_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders(custom_llm_provider)
llm_provider=litellm.LlmProviders(custom_llm_provider),
params={"ssl_verify": litellm_params.get("ssl_verify", None)},
)
else:
async_httpx_client = client
@ -318,7 +319,9 @@ class BaseLLMHTTPHandler:
)
if client is None or not isinstance(client, HTTPHandler):
sync_httpx_client = _get_httpx_client()
sync_httpx_client = _get_httpx_client(
params={"ssl_verify": litellm_params.get("ssl_verify", None)}
)
else:
sync_httpx_client = client
@ -359,7 +362,11 @@ class BaseLLMHTTPHandler:
client: Optional[HTTPHandler] = None,
) -> Tuple[Any, dict]:
if client is None or not isinstance(client, HTTPHandler):
sync_httpx_client = _get_httpx_client()
sync_httpx_client = _get_httpx_client(
{
"ssl_verify": litellm_params.get("ssl_verify", None),
}
)
else:
sync_httpx_client = client
stream = True
@ -411,7 +418,7 @@ class BaseLLMHTTPHandler:
fake_stream: bool = False,
client: Optional[AsyncHTTPHandler] = None,
):
completion_stream, _response_headers = await self.make_async_call(
completion_stream, _response_headers = await self.make_async_call_stream_helper(
custom_llm_provider=custom_llm_provider,
provider_config=provider_config,
api_base=api_base,
@ -432,7 +439,7 @@ class BaseLLMHTTPHandler:
)
return streamwrapper
async def make_async_call(
async def make_async_call_stream_helper(
self,
custom_llm_provider: str,
provider_config: BaseConfig,
@ -446,9 +453,15 @@ class BaseLLMHTTPHandler:
fake_stream: bool = False,
client: Optional[AsyncHTTPHandler] = None,
) -> Tuple[Any, httpx.Headers]:
"""
Helper function for making an async call with stream.
Handles fake stream as well.
"""
if client is None:
async_httpx_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders(custom_llm_provider)
llm_provider=litellm.LlmProviders(custom_llm_provider),
params={"ssl_verify": litellm_params.get("ssl_verify", None)},
)
else:
async_httpx_client = client