mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
* fix: remove aws params from bedrock embedding request body (#8618) * fix: remove aws params from bedrock embedding request body * fix-7548: handle aws params in base class * test: load request data from mock call * (Infra/DB) - Allow running older litellm version when out of sync with current state of DB (#8695) * fix check migration * clean up should_update_prisma_schema * update test * db_migration_disable_update_check * Check container logs for expected message * db_migration_disable_update_check * test_check_migration_out_of_sync * test_should_update_prisma_schema * db_migration_disable_update_check * pip install aiohttp * ui new build * delete deprecated code test * bump: version 1.61.12 → 1.61.13 * Add cost tracking for rerank via bedrock (#8691) * feat(bedrock/rerank): infer model region if model given as arn * test: add unit testing to ensure bedrock region name inferred from arn on rerank * feat(bedrock/rerank/transformation.py): include search units for bedrock rerank result Resolves https://github.com/BerriAI/litellm/issues/7258#issuecomment-2671557137 * test(test_bedrock_completion.py): add testing for bedrock cohere rerank * feat(cost_calculator.py): refactor rerank cost tracking to support bedrock cost tracking * build(model_prices_and_context_window.json): add amazon.rerank model to model cost map * fix(cost_calculator.py): bedrock/common_utils.py get base model from model w/ arn -> handles rerank model * build(model_prices_and_context_window.json): add bedrock cohere rerank pricing * feat(bedrock/rerank): migrate bedrock config to basererank config * Revert "feat(bedrock/rerank): migrate bedrock config to basererank config" This reverts commit84fae1f167
. * test: add testing to ensure large doc / queries are correctly counted * Revert "test: add testing to ensure large doc / queries are correctly counted" This reverts commit4337f1657e
. * fix(migrate-jina-ai-to-rerank-config): enables cost tracking * refactor(jina_ai/): finish migrating jina ai to base rerank config enables cost tracking * fix(jina_ai/rerank): e2e jina ai rerank cost tracking * fix: cleanup dead code * fix: fix python3.8 compatibility error * test: fix test * test: add e2e testing for azure ai rerank * fix: fix linting error * test: mark cohere as flaky * add bedrock llama vision support + cohere / infinity rerank - 'return_documents' support (#8684) * build(model_prices_and_context_window.json): mark bedrock llama as supporting vision based on docs * Add price for Cerebras llama3.3-70b (#8676) * docs(readme.md): fix contributing docs point people to new mock directory testing structure s/o @vibhavbhat * build: update contributing readme * docs(readme.md): improve docs * docs(readme.md): cleanup readme on tests/ * docs(README.md): cleanup doc * feat(infinity/): support returning documents when return_documents=True * test(test_rerank.py): add e2e testing for cohere rerank * fix: fix linting errors * fix(together_ai/): fix together ai transformation * fix: fix linting error * fix: fix linting errors * fix: fix linting errors * test: mark cohere as flaky * build: fix model supports check * test: fix test * test: mark flaky test * fix: fix test * test: fix test --------- Co-authored-by: Yury Koleda <fut.wrk@gmail.com> * test: fix test * fix: remove unused import * bump: version 1.61.13 → 1.61.14 * Correct spelling in user_management_heirarchy.md (#8716) Fixing irritating typo -- page and image names would also need to be updated * (Feat) - UI, Allow sorting models by Created_At and all other columns on the UI (#8725) * order models by created at * use existing table component on models page * sorting for created at * ui clean up models page * remove provider filter * fix columns sorting * decent switching * ui fix models page * (UI) Edit Model flow improvements (#8729) * order models by created at * use existing table component on models page * sorting for created at * ui clean up models page * remove provider filter * fix columns sorting * decent switching * ui fix models page * show edit / delete button on root of table * clean up columns * working edit model flow * decent working model edit page * fix edit model * show created at and created by * ui easy model edit flow * clean up columns * ui clean up updated at * fix model datatable * ui new build * bump: version 1.61.14 → 1.61.15 * Support arize phoenix on litellm proxy (#7756) (#8715) * Update opentelemetry.py wip * Update test_opentelemetry_unit_tests.py * fix a few paths and tests * fix path * Update litellm_logging.py * accidentally removed code * Add type for protocol * Add and update tests * minor changes * update and add additional arize phoenix test * update existing test * address feedback * use standard_logging_object * address feedback Co-authored-by: Nate Mar <67926244+nate-mar@users.noreply.github.com> * fix(amazon_deepseek_transformation.py): remove </think> from stream o… (#8717) * fix(amazon_deepseek_transformation.py): remove </think> from stream output - cleanup user facing stream * fix(key_managenet_endpoints.py): return `/key/list` sorted by created_at makes it easier to see created key * style: cleanup team table * feat(key_edit_view.tsx): support setting model specific tpm/rpm limits on keys * Add cohere v2/rerank support (#8421) (#8605) * Add cohere v2/rerank support (#8421) * Support v2 endpoint cohere rerank * Add tests and docs * Make v1 default if old params used * Update docs * Update docs pt 2 * Update tests * Add e2e test * Clean up code * Use inheritence for new config * Fix linting issues (#8608) * Fix cohere v2 failing test + linting (#8672) * Fix test and unused imports * Fix tests * fix: fix linting errors * test: handle tgai instability * fix: skip service unavailable err * test: print logs for unstable test * test: skip unreliable tests --------- Co-authored-by: vibhavbhat <vibhavb00@gmail.com> * fix(proxy/_types.py): fixes issue where internal user able to escalat… (#8740) * fix(proxy/_types.py): fixes issue where internal user able to escalate their role with ui key Fixes https://github.com/BerriAI/litellm/issues/8029 * style: cleanup * test: handle bedrock instability --------- Co-authored-by: Madhukar Holla <mholla8@gmail.com> Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com> Co-authored-by: Yury Koleda <fut.wrk@gmail.com> Co-authored-by: Oskar Austegard <oskar@austegard.com> Co-authored-by: Nate Mar <67926244+nate-mar@users.noreply.github.com> Co-authored-by: vibhavbhat <vibhavb00@gmail.com>
480 lines
18 KiB
Python
480 lines
18 KiB
Python
"""
|
|
Handles embedding calls to Bedrock's `/invoke` endpoint
|
|
"""
|
|
|
|
import copy
|
|
import json
|
|
from typing import Any, Callable, List, Optional, Tuple, Union
|
|
|
|
import httpx
|
|
|
|
import litellm
|
|
from litellm.llms.cohere.embed.handler import embedding as cohere_embedding
|
|
from litellm.llms.custom_httpx.http_handler import (
|
|
AsyncHTTPHandler,
|
|
HTTPHandler,
|
|
_get_httpx_client,
|
|
get_async_httpx_client,
|
|
)
|
|
from litellm.secret_managers.main import get_secret
|
|
from litellm.types.llms.bedrock import AmazonEmbeddingRequest, CohereEmbeddingRequest
|
|
from litellm.types.utils import EmbeddingResponse
|
|
|
|
from ..base_aws_llm import BaseAWSLLM
|
|
from ..common_utils import BedrockError
|
|
from .amazon_titan_g1_transformation import AmazonTitanG1Config
|
|
from .amazon_titan_multimodal_transformation import (
|
|
AmazonTitanMultimodalEmbeddingG1Config,
|
|
)
|
|
from .amazon_titan_v2_transformation import AmazonTitanV2Config
|
|
from .cohere_transformation import BedrockCohereEmbeddingConfig
|
|
|
|
|
|
class BedrockEmbedding(BaseAWSLLM):
|
|
def _load_credentials(
|
|
self,
|
|
optional_params: dict,
|
|
) -> Tuple[Any, str]:
|
|
try:
|
|
from botocore.credentials import Credentials
|
|
except ImportError:
|
|
raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.")
|
|
## CREDENTIALS ##
|
|
# pop aws_secret_access_key, aws_access_key_id, aws_session_token, aws_region_name from kwargs, since completion calls fail with them
|
|
aws_secret_access_key = optional_params.pop("aws_secret_access_key", None)
|
|
aws_access_key_id = optional_params.pop("aws_access_key_id", None)
|
|
aws_session_token = optional_params.pop("aws_session_token", None)
|
|
aws_region_name = optional_params.pop("aws_region_name", None)
|
|
aws_role_name = optional_params.pop("aws_role_name", None)
|
|
aws_session_name = optional_params.pop("aws_session_name", None)
|
|
aws_profile_name = optional_params.pop("aws_profile_name", None)
|
|
aws_web_identity_token = optional_params.pop("aws_web_identity_token", None)
|
|
aws_sts_endpoint = optional_params.pop("aws_sts_endpoint", None)
|
|
|
|
### SET REGION NAME ###
|
|
if aws_region_name is None:
|
|
# check env #
|
|
litellm_aws_region_name = get_secret("AWS_REGION_NAME", None)
|
|
|
|
if litellm_aws_region_name is not None and isinstance(
|
|
litellm_aws_region_name, str
|
|
):
|
|
aws_region_name = litellm_aws_region_name
|
|
|
|
standard_aws_region_name = get_secret("AWS_REGION", None)
|
|
if standard_aws_region_name is not None and isinstance(
|
|
standard_aws_region_name, str
|
|
):
|
|
aws_region_name = standard_aws_region_name
|
|
|
|
if aws_region_name is None:
|
|
aws_region_name = "us-west-2"
|
|
|
|
credentials: Credentials = self.get_credentials(
|
|
aws_access_key_id=aws_access_key_id,
|
|
aws_secret_access_key=aws_secret_access_key,
|
|
aws_session_token=aws_session_token,
|
|
aws_region_name=aws_region_name,
|
|
aws_session_name=aws_session_name,
|
|
aws_profile_name=aws_profile_name,
|
|
aws_role_name=aws_role_name,
|
|
aws_web_identity_token=aws_web_identity_token,
|
|
aws_sts_endpoint=aws_sts_endpoint,
|
|
)
|
|
return credentials, aws_region_name
|
|
|
|
async def async_embeddings(self):
|
|
pass
|
|
|
|
def _make_sync_call(
|
|
self,
|
|
client: Optional[HTTPHandler],
|
|
timeout: Optional[Union[float, httpx.Timeout]],
|
|
api_base: str,
|
|
headers: dict,
|
|
data: dict,
|
|
) -> dict:
|
|
if client is None or not isinstance(client, HTTPHandler):
|
|
_params = {}
|
|
if timeout is not None:
|
|
if isinstance(timeout, float) or isinstance(timeout, int):
|
|
timeout = httpx.Timeout(timeout)
|
|
_params["timeout"] = timeout
|
|
client = _get_httpx_client(_params) # type: ignore
|
|
else:
|
|
client = client
|
|
try:
|
|
response = client.post(url=api_base, headers=headers, data=json.dumps(data)) # type: ignore
|
|
response.raise_for_status()
|
|
except httpx.HTTPStatusError as err:
|
|
error_code = err.response.status_code
|
|
raise BedrockError(status_code=error_code, message=err.response.text)
|
|
except httpx.TimeoutException:
|
|
raise BedrockError(status_code=408, message="Timeout error occurred.")
|
|
|
|
return response.json()
|
|
|
|
async def _make_async_call(
|
|
self,
|
|
client: Optional[AsyncHTTPHandler],
|
|
timeout: Optional[Union[float, httpx.Timeout]],
|
|
api_base: str,
|
|
headers: dict,
|
|
data: dict,
|
|
) -> dict:
|
|
if client is None or not isinstance(client, AsyncHTTPHandler):
|
|
_params = {}
|
|
if timeout is not None:
|
|
if isinstance(timeout, float) or isinstance(timeout, int):
|
|
timeout = httpx.Timeout(timeout)
|
|
_params["timeout"] = timeout
|
|
client = get_async_httpx_client(
|
|
params=_params, llm_provider=litellm.LlmProviders.BEDROCK
|
|
)
|
|
else:
|
|
client = client
|
|
|
|
try:
|
|
response = await client.post(url=api_base, headers=headers, data=json.dumps(data)) # type: ignore
|
|
response.raise_for_status()
|
|
except httpx.HTTPStatusError as err:
|
|
error_code = err.response.status_code
|
|
raise BedrockError(status_code=error_code, message=err.response.text)
|
|
except httpx.TimeoutException:
|
|
raise BedrockError(status_code=408, message="Timeout error occurred.")
|
|
|
|
return response.json()
|
|
|
|
def _single_func_embeddings(
|
|
self,
|
|
client: Optional[HTTPHandler],
|
|
timeout: Optional[Union[float, httpx.Timeout]],
|
|
batch_data: List[dict],
|
|
credentials: Any,
|
|
extra_headers: Optional[dict],
|
|
endpoint_url: str,
|
|
aws_region_name: str,
|
|
model: str,
|
|
logging_obj: Any,
|
|
):
|
|
try:
|
|
from botocore.auth import SigV4Auth
|
|
from botocore.awsrequest import AWSRequest
|
|
except ImportError:
|
|
raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.")
|
|
|
|
responses: List[dict] = []
|
|
for data in batch_data:
|
|
sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name)
|
|
headers = {"Content-Type": "application/json"}
|
|
if extra_headers is not None:
|
|
headers = {"Content-Type": "application/json", **extra_headers}
|
|
request = AWSRequest(
|
|
method="POST", url=endpoint_url, data=json.dumps(data), headers=headers
|
|
)
|
|
sigv4.add_auth(request)
|
|
if (
|
|
extra_headers is not None and "Authorization" in extra_headers
|
|
): # prevent sigv4 from overwriting the auth header
|
|
request.headers["Authorization"] = extra_headers["Authorization"]
|
|
prepped = request.prepare()
|
|
|
|
## LOGGING
|
|
logging_obj.pre_call(
|
|
input=data,
|
|
api_key="",
|
|
additional_args={
|
|
"complete_input_dict": data,
|
|
"api_base": prepped.url,
|
|
"headers": prepped.headers,
|
|
},
|
|
)
|
|
response = self._make_sync_call(
|
|
client=client,
|
|
timeout=timeout,
|
|
api_base=prepped.url,
|
|
headers=prepped.headers, # type: ignore
|
|
data=data,
|
|
)
|
|
|
|
## LOGGING
|
|
logging_obj.post_call(
|
|
input=data,
|
|
api_key="",
|
|
original_response=response,
|
|
additional_args={"complete_input_dict": data},
|
|
)
|
|
|
|
responses.append(response)
|
|
|
|
returned_response: Optional[EmbeddingResponse] = None
|
|
|
|
## TRANSFORM RESPONSE ##
|
|
if model == "amazon.titan-embed-image-v1":
|
|
returned_response = (
|
|
AmazonTitanMultimodalEmbeddingG1Config()._transform_response(
|
|
response_list=responses, model=model
|
|
)
|
|
)
|
|
elif model == "amazon.titan-embed-text-v1":
|
|
returned_response = AmazonTitanG1Config()._transform_response(
|
|
response_list=responses, model=model
|
|
)
|
|
elif model == "amazon.titan-embed-text-v2:0":
|
|
returned_response = AmazonTitanV2Config()._transform_response(
|
|
response_list=responses, model=model
|
|
)
|
|
|
|
if returned_response is None:
|
|
raise Exception(
|
|
"Unable to map model response to known provider format. model={}".format(
|
|
model
|
|
)
|
|
)
|
|
|
|
return returned_response
|
|
|
|
async def _async_single_func_embeddings(
|
|
self,
|
|
client: Optional[AsyncHTTPHandler],
|
|
timeout: Optional[Union[float, httpx.Timeout]],
|
|
batch_data: List[dict],
|
|
credentials: Any,
|
|
extra_headers: Optional[dict],
|
|
endpoint_url: str,
|
|
aws_region_name: str,
|
|
model: str,
|
|
logging_obj: Any,
|
|
):
|
|
try:
|
|
from botocore.auth import SigV4Auth
|
|
from botocore.awsrequest import AWSRequest
|
|
except ImportError:
|
|
raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.")
|
|
|
|
responses: List[dict] = []
|
|
for data in batch_data:
|
|
sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name)
|
|
headers = {"Content-Type": "application/json"}
|
|
if extra_headers is not None:
|
|
headers = {"Content-Type": "application/json", **extra_headers}
|
|
request = AWSRequest(
|
|
method="POST", url=endpoint_url, data=json.dumps(data), headers=headers
|
|
)
|
|
sigv4.add_auth(request)
|
|
if (
|
|
extra_headers is not None and "Authorization" in extra_headers
|
|
): # prevent sigv4 from overwriting the auth header
|
|
request.headers["Authorization"] = extra_headers["Authorization"]
|
|
prepped = request.prepare()
|
|
|
|
## LOGGING
|
|
logging_obj.pre_call(
|
|
input=data,
|
|
api_key="",
|
|
additional_args={
|
|
"complete_input_dict": data,
|
|
"api_base": prepped.url,
|
|
"headers": prepped.headers,
|
|
},
|
|
)
|
|
response = await self._make_async_call(
|
|
client=client,
|
|
timeout=timeout,
|
|
api_base=prepped.url,
|
|
headers=prepped.headers, # type: ignore
|
|
data=data,
|
|
)
|
|
|
|
## LOGGING
|
|
logging_obj.post_call(
|
|
input=data,
|
|
api_key="",
|
|
original_response=response,
|
|
additional_args={"complete_input_dict": data},
|
|
)
|
|
|
|
responses.append(response)
|
|
|
|
returned_response: Optional[EmbeddingResponse] = None
|
|
|
|
## TRANSFORM RESPONSE ##
|
|
if model == "amazon.titan-embed-image-v1":
|
|
returned_response = (
|
|
AmazonTitanMultimodalEmbeddingG1Config()._transform_response(
|
|
response_list=responses, model=model
|
|
)
|
|
)
|
|
elif model == "amazon.titan-embed-text-v1":
|
|
returned_response = AmazonTitanG1Config()._transform_response(
|
|
response_list=responses, model=model
|
|
)
|
|
elif model == "amazon.titan-embed-text-v2:0":
|
|
returned_response = AmazonTitanV2Config()._transform_response(
|
|
response_list=responses, model=model
|
|
)
|
|
|
|
if returned_response is None:
|
|
raise Exception(
|
|
"Unable to map model response to known provider format. model={}".format(
|
|
model
|
|
)
|
|
)
|
|
|
|
return returned_response
|
|
|
|
def embeddings(
|
|
self,
|
|
model: str,
|
|
input: List[str],
|
|
api_base: Optional[str],
|
|
model_response: EmbeddingResponse,
|
|
print_verbose: Callable,
|
|
encoding,
|
|
logging_obj,
|
|
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]],
|
|
timeout: Optional[Union[float, httpx.Timeout]],
|
|
aembedding: Optional[bool],
|
|
extra_headers: Optional[dict],
|
|
optional_params: dict,
|
|
litellm_params: dict,
|
|
) -> EmbeddingResponse:
|
|
try:
|
|
from botocore.auth import SigV4Auth
|
|
from botocore.awsrequest import AWSRequest
|
|
except ImportError:
|
|
raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.")
|
|
|
|
credentials, aws_region_name = self._load_credentials(optional_params)
|
|
|
|
### TRANSFORMATION ###
|
|
provider = model.split(".")[0]
|
|
inference_params = copy.deepcopy(optional_params)
|
|
inference_params = {
|
|
k: v
|
|
for k, v in inference_params.items()
|
|
if k.lower() not in self.aws_authentication_params
|
|
}
|
|
inference_params.pop(
|
|
"user", None
|
|
) # make sure user is not passed in for bedrock call
|
|
modelId = (
|
|
optional_params.pop("model_id", None) or model
|
|
) # default to model if not passed
|
|
|
|
data: Optional[CohereEmbeddingRequest] = None
|
|
batch_data: Optional[List] = None
|
|
if provider == "cohere":
|
|
data = BedrockCohereEmbeddingConfig()._transform_request(
|
|
model=model, input=input, inference_params=inference_params
|
|
)
|
|
elif provider == "amazon" and model in [
|
|
"amazon.titan-embed-image-v1",
|
|
"amazon.titan-embed-text-v1",
|
|
"amazon.titan-embed-text-v2:0",
|
|
]:
|
|
batch_data = []
|
|
for i in input:
|
|
if model == "amazon.titan-embed-image-v1":
|
|
transformed_request: (
|
|
AmazonEmbeddingRequest
|
|
) = AmazonTitanMultimodalEmbeddingG1Config()._transform_request(
|
|
input=i, inference_params=inference_params
|
|
)
|
|
elif model == "amazon.titan-embed-text-v1":
|
|
transformed_request = AmazonTitanG1Config()._transform_request(
|
|
input=i, inference_params=inference_params
|
|
)
|
|
elif model == "amazon.titan-embed-text-v2:0":
|
|
transformed_request = AmazonTitanV2Config()._transform_request(
|
|
input=i, inference_params=inference_params
|
|
)
|
|
else:
|
|
raise Exception(
|
|
"Unmapped model. Received={}. Expected={}".format(
|
|
model,
|
|
[
|
|
"amazon.titan-embed-image-v1",
|
|
"amazon.titan-embed-text-v1",
|
|
"amazon.titan-embed-text-v2:0",
|
|
],
|
|
)
|
|
)
|
|
batch_data.append(transformed_request)
|
|
|
|
### SET RUNTIME ENDPOINT ###
|
|
endpoint_url, proxy_endpoint_url = self.get_runtime_endpoint(
|
|
api_base=api_base,
|
|
aws_bedrock_runtime_endpoint=optional_params.pop(
|
|
"aws_bedrock_runtime_endpoint", None
|
|
),
|
|
aws_region_name=aws_region_name,
|
|
)
|
|
endpoint_url = f"{endpoint_url}/model/{modelId}/invoke"
|
|
|
|
if batch_data is not None:
|
|
if aembedding:
|
|
return self._async_single_func_embeddings( # type: ignore
|
|
client=(
|
|
client
|
|
if client is not None and isinstance(client, AsyncHTTPHandler)
|
|
else None
|
|
),
|
|
timeout=timeout,
|
|
batch_data=batch_data,
|
|
credentials=credentials,
|
|
extra_headers=extra_headers,
|
|
endpoint_url=endpoint_url,
|
|
aws_region_name=aws_region_name,
|
|
model=model,
|
|
logging_obj=logging_obj,
|
|
)
|
|
return self._single_func_embeddings(
|
|
client=(
|
|
client
|
|
if client is not None and isinstance(client, HTTPHandler)
|
|
else None
|
|
),
|
|
timeout=timeout,
|
|
batch_data=batch_data,
|
|
credentials=credentials,
|
|
extra_headers=extra_headers,
|
|
endpoint_url=endpoint_url,
|
|
aws_region_name=aws_region_name,
|
|
model=model,
|
|
logging_obj=logging_obj,
|
|
)
|
|
elif data is None:
|
|
raise Exception("Unable to map Bedrock request to provider")
|
|
|
|
sigv4 = SigV4Auth(credentials, "bedrock", aws_region_name)
|
|
headers = {"Content-Type": "application/json"}
|
|
if extra_headers is not None:
|
|
headers = {"Content-Type": "application/json", **extra_headers}
|
|
|
|
request = AWSRequest(
|
|
method="POST", url=endpoint_url, data=json.dumps(data), headers=headers
|
|
)
|
|
sigv4.add_auth(request)
|
|
if (
|
|
extra_headers is not None and "Authorization" in extra_headers
|
|
): # prevent sigv4 from overwriting the auth header
|
|
request.headers["Authorization"] = extra_headers["Authorization"]
|
|
prepped = request.prepare()
|
|
|
|
## ROUTING ##
|
|
return cohere_embedding(
|
|
model=model,
|
|
input=input,
|
|
model_response=model_response,
|
|
logging_obj=logging_obj,
|
|
optional_params=optional_params,
|
|
encoding=encoding,
|
|
data=data, # type: ignore
|
|
complete_api_base=prepped.url,
|
|
api_key=None,
|
|
aembedding=aembedding,
|
|
timeout=timeout,
|
|
client=client,
|
|
headers=prepped.headers, # type: ignore
|
|
)
|