From add153d1106efb8520a9f8679ab776223bcb0ad5 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 15 Dec 2023 21:02:41 -0800 Subject: [PATCH] fix(huggingface_restapi.py): add support for additional hf embedding formats --- litellm/llms/huggingface_restapi.py | 8 ++++++++ litellm/main.py | 3 +-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index c347910f89..2f9485963a 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -584,6 +584,14 @@ class Huggingface(BaseLLM): "embedding": embedding # flatten list returned from hf } ) + elif isinstance(embedding, list) and isinstance(embedding[0], float): + output_data.append( + { + "object": "embedding", + "index": idx, + "embedding": embedding # flatten list returned from hf + } + ) else: output_data.append( { diff --git a/litellm/main.py b/litellm/main.py index 16ce2850cf..31613a67aa 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1748,8 +1748,7 @@ async def aembedding(*args, **kwargs): or custom_llm_provider == "anyscale" or custom_llm_provider == "openrouter" or custom_llm_provider == "deepinfra" - or custom_llm_provider == "perplexity" - or custom_llm_provider == "huggingface"): # currently implemented aiohttp calls for just azure and openai, soon all. + or custom_llm_provider == "perplexity"): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally init_response = await loop.run_in_executor(None, func_with_context) if isinstance(init_response, dict) or isinstance(init_response, ModelResponse): ## CACHING SCENARIO