support sync ollama embeddings

This commit is contained in:
Mehmet Bektas 2024-05-05 19:44:25 -07:00
parent 918367cc7b
commit 3acad270e5
2 changed files with 30 additions and 10 deletions

View file

@ -417,3 +417,23 @@ async def ollama_aembeddings(
"total_tokens": total_input_tokens, "total_tokens": total_input_tokens,
} }
return model_response return model_response
def ollama_embeddings(
api_base: str,
model: str,
prompts: list,
optional_params=None,
logging_obj=None,
model_response=None,
encoding=None,
):
return asyncio.run(
ollama_aembeddings(
api_base,
model,
prompts,
optional_params,
logging_obj,
model_response,
encoding)
)

View file

@ -2946,16 +2946,16 @@ def embedding(
model=model, # type: ignore model=model, # type: ignore
llm_provider="ollama", # type: ignore llm_provider="ollama", # type: ignore
) )
if aembedding: ollama_embeddings_fn = ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings
response = ollama.ollama_aembeddings( response = ollama_embeddings_fn(
api_base=api_base, api_base=api_base,
model=model, model=model,
prompts=input, prompts=input,
encoding=encoding, encoding=encoding,
logging_obj=logging, logging_obj=logging,
optional_params=optional_params, optional_params=optional_params,
model_response=EmbeddingResponse(), model_response=EmbeddingResponse(),
) )
elif custom_llm_provider == "sagemaker": elif custom_llm_provider == "sagemaker":
response = sagemaker.embedding( response = sagemaker.embedding(
model=model, model=model,