mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
* feat(bedrock/rerank): infer model region if model given as arn * test: add unit testing to ensure bedrock region name inferred from arn on rerank * feat(bedrock/rerank/transformation.py): include search units for bedrock rerank result Resolves https://github.com/BerriAI/litellm/issues/7258#issuecomment-2671557137 * test(test_bedrock_completion.py): add testing for bedrock cohere rerank * feat(cost_calculator.py): refactor rerank cost tracking to support bedrock cost tracking * build(model_prices_and_context_window.json): add amazon.rerank model to model cost map * fix(cost_calculator.py): bedrock/common_utils.py get base model from model w/ arn -> handles rerank model * build(model_prices_and_context_window.json): add bedrock cohere rerank pricing * feat(bedrock/rerank): migrate bedrock config to basererank config * Revert "feat(bedrock/rerank): migrate bedrock config to basererank config" This reverts commit84fae1f167
. * test: add testing to ensure large doc / queries are correctly counted * Revert "test: add testing to ensure large doc / queries are correctly counted" This reverts commit4337f1657e
. * fix(migrate-jina-ai-to-rerank-config): enables cost tracking * refactor(jina_ai/): finish migrating jina ai to base rerank config enables cost tracking * fix(jina_ai/rerank): e2e jina ai rerank cost tracking * fix: cleanup dead code * fix: fix python3.8 compatibility error * test: fix test * test: add e2e testing for azure ai rerank * fix: fix linting error * test: mark cohere as flaky
42 lines
1.5 KiB
Python
42 lines
1.5 KiB
Python
from typing import Any, Dict, List, Optional, Union
|
|
|
|
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
|
|
from litellm.types.rerank import OptionalRerankParams
|
|
|
|
|
|
def get_optional_rerank_params(
|
|
rerank_provider_config: BaseRerankConfig,
|
|
model: str,
|
|
drop_params: bool,
|
|
query: str,
|
|
documents: List[Union[str, Dict[str, Any]]],
|
|
custom_llm_provider: Optional[str] = None,
|
|
top_n: Optional[int] = None,
|
|
rank_fields: Optional[List[str]] = None,
|
|
return_documents: Optional[bool] = True,
|
|
max_chunks_per_doc: Optional[int] = None,
|
|
non_default_params: Optional[dict] = None,
|
|
) -> OptionalRerankParams:
|
|
all_non_default_params = non_default_params or {}
|
|
if query is not None:
|
|
all_non_default_params["query"] = query
|
|
if top_n is not None:
|
|
all_non_default_params["top_n"] = top_n
|
|
if documents is not None:
|
|
all_non_default_params["documents"] = documents
|
|
if return_documents is not None:
|
|
all_non_default_params["return_documents"] = return_documents
|
|
if max_chunks_per_doc is not None:
|
|
all_non_default_params["max_chunks_per_doc"] = max_chunks_per_doc
|
|
return rerank_provider_config.map_cohere_rerank_params(
|
|
model=model,
|
|
drop_params=drop_params,
|
|
query=query,
|
|
documents=documents,
|
|
custom_llm_provider=custom_llm_provider,
|
|
top_n=top_n,
|
|
rank_fields=rank_fields,
|
|
return_documents=return_documents,
|
|
max_chunks_per_doc=max_chunks_per_doc,
|
|
non_default_params=all_non_default_params,
|
|
)
|