mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* feat(pass_through_endpoints/): support logging anthropic/gemini pass through calls to langfuse/s3/etc. * fix(utils.py): allow disabling end user cost tracking with new param Allows proxy admin to disable cost tracking for end user - keeps prometheus metrics small * docs(configs.md): add disable_end_user_cost_tracking reference to docs * feat(key_management_endpoints.py): add support for restricting access to `/key/generate` by team/proxy level role Enables admin to restrict key creation, and assign team admins to handle distributing keys * test(test_key_management.py): add unit testing for personal / team key restriction checks * docs: add docs on restricting key creation * docs(finetuned_models.md): add new guide on calling finetuned models * docs(input.md): cleanup anthropic supported params Closes https://github.com/BerriAI/litellm/issues/6856 * test(test_embedding.py): add test for passing extra headers via embedding * feat(cohere/embed): pass client to async embedding * feat(rerank.py): add `/v1/rerank` if missing for cohere base url Closes https://github.com/BerriAI/litellm/issues/6844 * fix(main.py): pass extra_headers param to openai Fixes https://github.com/BerriAI/litellm/issues/6836 * fix(litellm_logging.py): don't disable global callbacks when dynamic callbacks are set Fixes issue where global callbacks - e.g. prometheus were overriden when langfuse was set dynamically * fix(handler.py): fix linting error * fix: fix typing * build: add conftest to proxy_admin_ui_tests/ * test: fix test * fix: fix linting errors * test: fix test * fix: fix pass through testing
127 lines
4.4 KiB
Python
127 lines
4.4 KiB
Python
from typing import Any, Dict, List, Optional, Union
|
|
|
|
import httpx
|
|
|
|
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
|
from litellm.llms.cohere.rerank import CohereRerank
|
|
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
|
from litellm.types.rerank import RerankResponse
|
|
|
|
|
|
class AzureAIRerank(CohereRerank):
|
|
|
|
def get_base_model(self, azure_model_group: Optional[str]) -> Optional[str]:
|
|
if azure_model_group is None:
|
|
return None
|
|
if azure_model_group == "offer-cohere-rerank-mul-paygo":
|
|
return "azure_ai/cohere-rerank-v3-multilingual"
|
|
if azure_model_group == "offer-cohere-rerank-eng-paygo":
|
|
return "azure_ai/cohere-rerank-v3-english"
|
|
return azure_model_group
|
|
|
|
async def async_azure_rerank(
|
|
self,
|
|
model: str,
|
|
api_key: str,
|
|
api_base: str,
|
|
query: str,
|
|
documents: List[Union[str, Dict[str, Any]]],
|
|
headers: Optional[dict],
|
|
litellm_logging_obj: LiteLLMLoggingObj,
|
|
top_n: Optional[int] = None,
|
|
rank_fields: Optional[List[str]] = None,
|
|
return_documents: Optional[bool] = True,
|
|
max_chunks_per_doc: Optional[int] = None,
|
|
):
|
|
returned_response: RerankResponse = await super().rerank( # type: ignore
|
|
model=model,
|
|
api_key=api_key,
|
|
api_base=api_base,
|
|
query=query,
|
|
documents=documents,
|
|
top_n=top_n,
|
|
rank_fields=rank_fields,
|
|
return_documents=return_documents,
|
|
max_chunks_per_doc=max_chunks_per_doc,
|
|
_is_async=True,
|
|
headers=headers,
|
|
litellm_logging_obj=litellm_logging_obj,
|
|
)
|
|
|
|
# get base model
|
|
additional_headers = (
|
|
returned_response._hidden_params.get("additional_headers") or {}
|
|
)
|
|
|
|
base_model = self.get_base_model(
|
|
additional_headers.get("llm_provider-azureml-model-group")
|
|
)
|
|
returned_response._hidden_params["model"] = base_model
|
|
|
|
return returned_response
|
|
|
|
def rerank(
|
|
self,
|
|
model: str,
|
|
api_key: str,
|
|
api_base: str,
|
|
query: str,
|
|
documents: List[Union[str, Dict[str, Any]]],
|
|
headers: Optional[dict],
|
|
litellm_logging_obj: LiteLLMLoggingObj,
|
|
top_n: Optional[int] = None,
|
|
rank_fields: Optional[List[str]] = None,
|
|
return_documents: Optional[bool] = True,
|
|
max_chunks_per_doc: Optional[int] = None,
|
|
_is_async: Optional[bool] = False,
|
|
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
|
|
) -> RerankResponse:
|
|
|
|
if headers is None:
|
|
headers = {"Authorization": "Bearer {}".format(api_key)}
|
|
else:
|
|
headers = {**headers, "Authorization": "Bearer {}".format(api_key)}
|
|
|
|
# Assuming api_base is a string representing the base URL
|
|
api_base_url = httpx.URL(api_base)
|
|
|
|
# Replace the path with '/v1/rerank' if it doesn't already end with it
|
|
if not api_base_url.path.endswith("/v1/rerank"):
|
|
api_base = str(api_base_url.copy_with(path="/v1/rerank"))
|
|
|
|
if _is_async:
|
|
return self.async_azure_rerank( # type: ignore
|
|
model=model,
|
|
api_key=api_key,
|
|
api_base=api_base,
|
|
query=query,
|
|
documents=documents,
|
|
top_n=top_n,
|
|
rank_fields=rank_fields,
|
|
return_documents=return_documents,
|
|
max_chunks_per_doc=max_chunks_per_doc,
|
|
headers=headers,
|
|
litellm_logging_obj=litellm_logging_obj,
|
|
)
|
|
else:
|
|
returned_response = super().rerank(
|
|
model=model,
|
|
api_key=api_key,
|
|
api_base=api_base,
|
|
query=query,
|
|
documents=documents,
|
|
top_n=top_n,
|
|
rank_fields=rank_fields,
|
|
return_documents=return_documents,
|
|
max_chunks_per_doc=max_chunks_per_doc,
|
|
_is_async=_is_async,
|
|
headers=headers,
|
|
litellm_logging_obj=litellm_logging_obj,
|
|
)
|
|
|
|
# get base model
|
|
base_model = self.get_base_model(
|
|
returned_response._hidden_params.get("llm_provider-azureml-model-group")
|
|
)
|
|
returned_response._hidden_params["model"] = base_model
|
|
return returned_response
|