fix(http_handler.py): allow setting ca bundle path

This commit is contained in:
Krrish Dholakia 2024-06-01 14:48:53 -07:00
parent f1ce7bb300
commit a16a1c407a
3 changed files with 12 additions and 3 deletions

View file

@ -102,6 +102,7 @@ common_cloud_provider_auth_params: dict = {
}
use_client: bool = False
ssl_verify: bool = True
ssl_certificate: Optional[str] = None
disable_streaming_logging: bool = False
in_memory_llm_clients_cache: dict = {}
### GUARDRAILS ###

View file

@ -12,15 +12,16 @@ class AsyncHTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]] = None,
concurrent_limit=1000,
):
sync_proxy_mounts = None
async_proxy_mounts = None
# Check if the HTTP_PROXY and HTTPS_PROXY environment variables are set and use them accordingly.
http_proxy = os.getenv("HTTP_PROXY", None)
https_proxy = os.getenv("HTTPS_PROXY", None)
no_proxy = os.getenv("NO_PROXY", None)
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
cert = os.getenv(
"SSL_CERTIFICATE", litellm.ssl_certificate
) # /path/to/client.pem
sync_proxy_mounts = None
if http_proxy is not None and https_proxy is not None:
async_proxy_mounts = {
"http://": httpx.AsyncHTTPTransport(proxy=httpx.Proxy(url=http_proxy)),
@ -46,6 +47,7 @@ class AsyncHTTPHandler:
),
verify=ssl_verify,
mounts=async_proxy_mounts,
cert=cert,
)
async def close(self):
@ -108,6 +110,9 @@ class HTTPHandler:
https_proxy = os.getenv("HTTPS_PROXY", None)
no_proxy = os.getenv("NO_PROXY", None)
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
cert = os.getenv(
"SSL_CERTIFICATE", litellm.ssl_certificate
) # /path/to/client.pem
sync_proxy_mounts = None
if http_proxy is not None and https_proxy is not None:
@ -132,6 +137,7 @@ class HTTPHandler:
),
verify=ssl_verify,
mounts=sync_proxy_mounts,
cert=cert,
)
else:
self.client = client

View file

@ -223,7 +223,7 @@ async def acompletion(
extra_headers: Optional[dict] = None,
# Optional liteLLM function params
**kwargs,
):
) -> Union[ModelResponse, CustomStreamWrapper]:
"""
Asynchronously executes a litellm.completion() call for any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly)
@ -339,6 +339,8 @@ async def acompletion(
if isinstance(init_response, dict) or isinstance(
init_response, ModelResponse
): ## CACHING SCENARIO
if isinstance(init_response, dict):
response = ModelResponse(**init_response)
response = init_response
elif asyncio.iscoroutine(init_response):
response = await init_response