(code refactor) - Add BaseRerankConfig. Use BaseRerankConfig for cohere/rerank and azure_ai/rerank (#7319)

* add base rerank config

* working sync cohere rerank

* update rerank types

* update base rerank config

* remove old rerank

* add new cohere handler.py

* add cohere rerank transform

* add get_provider_rerank_config

* add rerank to base llm http handler

* add rerank utils

* add arerank to llm http handler.py

* add AzureAIRerankConfig

* updates rerank config

* update test rerank

* fix unused imports

* update get_provider_rerank_config

* test_basic_rerank_caching

* fix unused import

* test rerank
This commit is contained in:
Ishaan Jaff 2024-12-19 17:03:34 -08:00 committed by GitHub
parent a790d43116
commit 5f15b0aa20
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 645 additions and 425 deletions

View file

@ -1,127 +1,5 @@
from typing import Any, Dict, List, Optional, Union
"""
Azure AI Rerank - uses `llm_http_handler.py` to make httpx requests
import httpx
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.cohere.rerank import CohereRerank
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.types.rerank import RerankResponse
class AzureAIRerank(CohereRerank):
def get_base_model(self, azure_model_group: Optional[str]) -> Optional[str]:
if azure_model_group is None:
return None
if azure_model_group == "offer-cohere-rerank-mul-paygo":
return "azure_ai/cohere-rerank-v3-multilingual"
if azure_model_group == "offer-cohere-rerank-eng-paygo":
return "azure_ai/cohere-rerank-v3-english"
return azure_model_group
async def async_azure_rerank(
self,
model: str,
api_key: str,
api_base: str,
query: str,
documents: List[Union[str, Dict[str, Any]]],
headers: Optional[dict],
litellm_logging_obj: LiteLLMLoggingObj,
top_n: Optional[int] = None,
rank_fields: Optional[List[str]] = None,
return_documents: Optional[bool] = True,
max_chunks_per_doc: Optional[int] = None,
):
returned_response: RerankResponse = await super().rerank( # type: ignore
model=model,
api_key=api_key,
api_base=api_base,
query=query,
documents=documents,
top_n=top_n,
rank_fields=rank_fields,
return_documents=return_documents,
max_chunks_per_doc=max_chunks_per_doc,
_is_async=True,
headers=headers,
litellm_logging_obj=litellm_logging_obj,
)
# get base model
additional_headers = (
returned_response._hidden_params.get("additional_headers") or {}
)
base_model = self.get_base_model(
additional_headers.get("llm_provider-azureml-model-group")
)
returned_response._hidden_params["model"] = base_model
return returned_response
def rerank(
self,
model: str,
api_key: str,
api_base: str,
query: str,
documents: List[Union[str, Dict[str, Any]]],
headers: Optional[dict],
litellm_logging_obj: LiteLLMLoggingObj,
top_n: Optional[int] = None,
rank_fields: Optional[List[str]] = None,
return_documents: Optional[bool] = True,
max_chunks_per_doc: Optional[int] = None,
_is_async: Optional[bool] = False,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
) -> RerankResponse:
if headers is None:
headers = {"Authorization": "Bearer {}".format(api_key)}
else:
headers = {**headers, "Authorization": "Bearer {}".format(api_key)}
# Assuming api_base is a string representing the base URL
api_base_url = httpx.URL(api_base)
# Replace the path with '/v1/rerank' if it doesn't already end with it
if not api_base_url.path.endswith("/v1/rerank"):
api_base = str(api_base_url.copy_with(path="/v1/rerank"))
if _is_async:
return self.async_azure_rerank( # type: ignore
model=model,
api_key=api_key,
api_base=api_base,
query=query,
documents=documents,
top_n=top_n,
rank_fields=rank_fields,
return_documents=return_documents,
max_chunks_per_doc=max_chunks_per_doc,
headers=headers,
litellm_logging_obj=litellm_logging_obj,
)
else:
returned_response = super().rerank(
model=model,
api_key=api_key,
api_base=api_base,
query=query,
documents=documents,
top_n=top_n,
rank_fields=rank_fields,
return_documents=return_documents,
max_chunks_per_doc=max_chunks_per_doc,
_is_async=_is_async,
headers=headers,
litellm_logging_obj=litellm_logging_obj,
)
# get base model
base_model = self.get_base_model(
returned_response._hidden_params.get("llm_provider-azureml-model-group")
)
returned_response._hidden_params["model"] = base_model
return returned_response
Request/Response transformation is handled in `transformation.py`
"""