diff --git a/litellm/llms/openai_like/embedding/handler.py b/litellm/llms/openai_like/embedding/handler.py index ce0860724..7ddf43cb8 100644 --- a/litellm/llms/openai_like/embedding/handler.py +++ b/litellm/llms/openai_like/embedding/handler.py @@ -62,7 +62,7 @@ class OpenAILikeEmbeddingHandler(OpenAILikeBase): except httpx.HTTPStatusError as e: raise OpenAILikeError( status_code=e.response.status_code, - message=response.text if response else str(e), + message=e.response.text if e.response else str(e), ) except httpx.TimeoutException: raise OpenAILikeError( diff --git a/litellm/main.py b/litellm/main.py index 32055eb9d..42e7889f2 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -3182,6 +3182,7 @@ async def aembedding(*args, **kwargs) -> EmbeddingResponse: or custom_llm_provider == "azure_ai" or custom_llm_provider == "together_ai" or custom_llm_provider == "openai_like" + or custom_llm_provider == "jina_ai" ): # currently implemented aiohttp calls for just azure and openai, soon all. # Await normally init_response = await loop.run_in_executor(None, func_with_context) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index b06a9e667..9be7c894b 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -6,7 +6,10 @@ model_list: - model_name: rerank-model litellm_params: model: jina_ai/jina-reranker-v2-base-multilingual - + - model_name: jina_ai/jina-embeddings-v3 + litellm_params: + model: jina_ai/jina-embeddings-v3 + max_tokens: 8192 router_settings: model_group_alias: