diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index 5972d9e8c2..f4c06dbe64 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -417,3 +417,23 @@ async def ollama_aembeddings( "total_tokens": total_input_tokens, } return model_response + +def ollama_embeddings( + api_base: str, + model: str, + prompts: list, + optional_params=None, + logging_obj=None, + model_response=None, + encoding=None, +): + return asyncio.run( + ollama_aembeddings( + api_base, + model, + prompts, + optional_params, + logging_obj, + model_response, + encoding) + ) diff --git a/litellm/main.py b/litellm/main.py index 8717af5704..9aaf30f051 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2946,16 +2946,16 @@ def embedding( model=model, # type: ignore llm_provider="ollama", # type: ignore ) - if aembedding: - response = ollama.ollama_aembeddings( - api_base=api_base, - model=model, - prompts=input, - encoding=encoding, - logging_obj=logging, - optional_params=optional_params, - model_response=EmbeddingResponse(), - ) + ollama_embeddings_fn = ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings + response = ollama_embeddings_fn( + api_base=api_base, + model=model, + prompts=input, + encoding=encoding, + logging_obj=logging, + optional_params=optional_params, + model_response=EmbeddingResponse(), + ) elif custom_llm_provider == "sagemaker": response = sagemaker.embedding( model=model,