fix(huggingface_restapi.py): add support for additional hf embedding formats

This commit is contained in:
Krrish Dholakia 2023-12-15 21:02:41 -08:00
parent 5fe5149070
commit add153d110
2 changed files with 9 additions and 2 deletions

View file

@ -584,6 +584,14 @@ class Huggingface(BaseLLM):
"embedding": embedding # flatten list returned from hf "embedding": embedding # flatten list returned from hf
} }
) )
elif isinstance(embedding, list) and isinstance(embedding[0], float):
output_data.append(
{
"object": "embedding",
"index": idx,
"embedding": embedding # flatten list returned from hf
}
)
else: else:
output_data.append( output_data.append(
{ {

View file

@ -1748,8 +1748,7 @@ async def aembedding(*args, **kwargs):
or custom_llm_provider == "anyscale" or custom_llm_provider == "anyscale"
or custom_llm_provider == "openrouter" or custom_llm_provider == "openrouter"
or custom_llm_provider == "deepinfra" or custom_llm_provider == "deepinfra"
or custom_llm_provider == "perplexity" or custom_llm_provider == "perplexity"): # currently implemented aiohttp calls for just azure and openai, soon all.
or custom_llm_provider == "huggingface"): # currently implemented aiohttp calls for just azure and openai, soon all.
# Await normally # Await normally
init_response = await loop.run_in_executor(None, func_with_context) init_response = await loop.run_in_executor(None, func_with_context)
if isinstance(init_response, dict) or isinstance(init_response, ModelResponse): ## CACHING SCENARIO if isinstance(init_response, dict) or isinstance(init_response, ModelResponse): ## CACHING SCENARIO