mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(http_handler.py): allow setting ca bundle path
This commit is contained in:
parent
f1ce7bb300
commit
a16a1c407a
3 changed files with 12 additions and 3 deletions
|
@ -102,6 +102,7 @@ common_cloud_provider_auth_params: dict = {
|
||||||
}
|
}
|
||||||
use_client: bool = False
|
use_client: bool = False
|
||||||
ssl_verify: bool = True
|
ssl_verify: bool = True
|
||||||
|
ssl_certificate: Optional[str] = None
|
||||||
disable_streaming_logging: bool = False
|
disable_streaming_logging: bool = False
|
||||||
in_memory_llm_clients_cache: dict = {}
|
in_memory_llm_clients_cache: dict = {}
|
||||||
### GUARDRAILS ###
|
### GUARDRAILS ###
|
||||||
|
|
|
@ -12,15 +12,16 @@ class AsyncHTTPHandler:
|
||||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||||
concurrent_limit=1000,
|
concurrent_limit=1000,
|
||||||
):
|
):
|
||||||
sync_proxy_mounts = None
|
|
||||||
async_proxy_mounts = None
|
async_proxy_mounts = None
|
||||||
# Check if the HTTP_PROXY and HTTPS_PROXY environment variables are set and use them accordingly.
|
# Check if the HTTP_PROXY and HTTPS_PROXY environment variables are set and use them accordingly.
|
||||||
http_proxy = os.getenv("HTTP_PROXY", None)
|
http_proxy = os.getenv("HTTP_PROXY", None)
|
||||||
https_proxy = os.getenv("HTTPS_PROXY", None)
|
https_proxy = os.getenv("HTTPS_PROXY", None)
|
||||||
no_proxy = os.getenv("NO_PROXY", None)
|
no_proxy = os.getenv("NO_PROXY", None)
|
||||||
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
|
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
|
||||||
|
cert = os.getenv(
|
||||||
|
"SSL_CERTIFICATE", litellm.ssl_certificate
|
||||||
|
) # /path/to/client.pem
|
||||||
|
|
||||||
sync_proxy_mounts = None
|
|
||||||
if http_proxy is not None and https_proxy is not None:
|
if http_proxy is not None and https_proxy is not None:
|
||||||
async_proxy_mounts = {
|
async_proxy_mounts = {
|
||||||
"http://": httpx.AsyncHTTPTransport(proxy=httpx.Proxy(url=http_proxy)),
|
"http://": httpx.AsyncHTTPTransport(proxy=httpx.Proxy(url=http_proxy)),
|
||||||
|
@ -46,6 +47,7 @@ class AsyncHTTPHandler:
|
||||||
),
|
),
|
||||||
verify=ssl_verify,
|
verify=ssl_verify,
|
||||||
mounts=async_proxy_mounts,
|
mounts=async_proxy_mounts,
|
||||||
|
cert=cert,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
|
@ -108,6 +110,9 @@ class HTTPHandler:
|
||||||
https_proxy = os.getenv("HTTPS_PROXY", None)
|
https_proxy = os.getenv("HTTPS_PROXY", None)
|
||||||
no_proxy = os.getenv("NO_PROXY", None)
|
no_proxy = os.getenv("NO_PROXY", None)
|
||||||
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
|
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
|
||||||
|
cert = os.getenv(
|
||||||
|
"SSL_CERTIFICATE", litellm.ssl_certificate
|
||||||
|
) # /path/to/client.pem
|
||||||
|
|
||||||
sync_proxy_mounts = None
|
sync_proxy_mounts = None
|
||||||
if http_proxy is not None and https_proxy is not None:
|
if http_proxy is not None and https_proxy is not None:
|
||||||
|
@ -132,6 +137,7 @@ class HTTPHandler:
|
||||||
),
|
),
|
||||||
verify=ssl_verify,
|
verify=ssl_verify,
|
||||||
mounts=sync_proxy_mounts,
|
mounts=sync_proxy_mounts,
|
||||||
|
cert=cert,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.client = client
|
self.client = client
|
||||||
|
|
|
@ -223,7 +223,7 @@ async def acompletion(
|
||||||
extra_headers: Optional[dict] = None,
|
extra_headers: Optional[dict] = None,
|
||||||
# Optional liteLLM function params
|
# Optional liteLLM function params
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
) -> Union[ModelResponse, CustomStreamWrapper]:
|
||||||
"""
|
"""
|
||||||
Asynchronously executes a litellm.completion() call for any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly)
|
Asynchronously executes a litellm.completion() call for any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly)
|
||||||
|
|
||||||
|
@ -339,6 +339,8 @@ async def acompletion(
|
||||||
if isinstance(init_response, dict) or isinstance(
|
if isinstance(init_response, dict) or isinstance(
|
||||||
init_response, ModelResponse
|
init_response, ModelResponse
|
||||||
): ## CACHING SCENARIO
|
): ## CACHING SCENARIO
|
||||||
|
if isinstance(init_response, dict):
|
||||||
|
response = ModelResponse(**init_response)
|
||||||
response = init_response
|
response = init_response
|
||||||
elif asyncio.iscoroutine(init_response):
|
elif asyncio.iscoroutine(init_response):
|
||||||
response = await init_response
|
response = await init_response
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue