forked from phoenix/litellm-mirror
fix(huggingface_restapi.py): fix tests
This commit is contained in:
parent
0c26b36d9d
commit
8c5ff150f6
2 changed files with 6 additions and 4 deletions
|
@ -619,6 +619,8 @@ class Huggingface(BaseLLM):
|
|||
|
||||
# SSL certificates (a.k.a CA bundle) used to verify the identity of requested hosts.
|
||||
ssl_verify = os.getenv("SSL_VERIFY", litellm.ssl_verify)
|
||||
if ssl_verify in ["True", "False"]:
|
||||
ssl_verify = bool(ssl_verify)
|
||||
|
||||
if acompletion is True:
|
||||
### ASYNC STREAMING
|
||||
|
@ -634,7 +636,7 @@ class Huggingface(BaseLLM):
|
|||
headers=headers,
|
||||
data=json.dumps(data),
|
||||
stream=optional_params["stream"],
|
||||
verify=ssl_verify
|
||||
verify=ssl_verify,
|
||||
)
|
||||
return response.iter_lines()
|
||||
### SYNC COMPLETION
|
||||
|
@ -643,7 +645,7 @@ class Huggingface(BaseLLM):
|
|||
completion_url,
|
||||
headers=headers,
|
||||
data=json.dumps(data),
|
||||
verify=ssl_verify
|
||||
verify=ssl_verify,
|
||||
)
|
||||
|
||||
## Some servers might return streaming responses even though stream was not set to true. (e.g. Baseten)
|
||||
|
|
|
@ -1768,7 +1768,7 @@ def test_get_hf_task_for_model():
|
|||
# ################### Hugging Face TGI models ########################
|
||||
# # TGI model
|
||||
# # this is a TGI model https://huggingface.co/glaiveai/glaive-coder-7b
|
||||
def tgi_mock_post(url, data=None, json=None, headers=None):
|
||||
def tgi_mock_post(url, **kwargs):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.headers = {"Content-Type": "application/json"}
|
||||
|
@ -1936,7 +1936,7 @@ async def test_openai_compatible_custom_api_base(provider):
|
|||
# hf_test_completion_none_task()
|
||||
|
||||
|
||||
def mock_post(url, data=None, json=None, headers=None):
|
||||
def mock_post(url, **kwargs):
|
||||
print(f"url={url}")
|
||||
if "text-classification" in url:
|
||||
raise Exception("Model not found")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue