diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index 2e4e218152..69715167bb 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -619,6 +619,8 @@ class Huggingface(BaseLLM): # SSL certificates (a.k.a CA bundle) used to verify the identity of requested hosts. ssl_verify = os.getenv("SSL_VERIFY", litellm.ssl_verify) + if ssl_verify in ["True", "False"]: + ssl_verify = bool(ssl_verify) if acompletion is True: ### ASYNC STREAMING @@ -634,7 +636,7 @@ class Huggingface(BaseLLM): headers=headers, data=json.dumps(data), stream=optional_params["stream"], - verify=ssl_verify + verify=ssl_verify, ) return response.iter_lines() ### SYNC COMPLETION @@ -643,7 +645,7 @@ class Huggingface(BaseLLM): completion_url, headers=headers, data=json.dumps(data), - verify=ssl_verify + verify=ssl_verify, ) ## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 25168ec017..47c6aedc16 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -1768,7 +1768,7 @@ def test_get_hf_task_for_model(): # ################### Hugging Face TGI models ######################## # # TGI model # # this is a TGI model https://huggingface.co/glaiveai/glaive-coder-7b -def tgi_mock_post(url, data=None, json=None, headers=None): +def tgi_mock_post(url, **kwargs): mock_response = MagicMock() mock_response.status_code = 200 mock_response.headers = {"Content-Type": "application/json"} @@ -1936,7 +1936,7 @@ async def test_openai_compatible_custom_api_base(provider): # hf_test_completion_none_task() -def mock_post(url, data=None, json=None, headers=None): +def mock_post(url, **kwargs): print(f"url={url}") if "text-classification" in url: raise Exception("Model not found")