mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
* feat(proxy/_types.py): add lago billing to callbacks ui Closes https://github.com/BerriAI/litellm/issues/5472 * fix(anthropic.py): return anthropic prompt caching information Fixes https://github.com/BerriAI/litellm/issues/5364 * feat(bedrock/chat.py): support 'json_schema' for bedrock models Closes https://github.com/BerriAI/litellm/issues/5434 * fix(bedrock/embed/embeddings.py): support async embeddings for amazon titan models * fix: linting fixes * fix: handle key errors * fix(bedrock/chat.py): fix bedrock ai21 streaming object * feat(bedrock/embed): support bedrock embedding optional params * fix(databricks.py): fix usage chunk * fix(internal_user_endpoints.py): apply internal user defaults, if user role updated Fixes issue where user update wouldn't apply defaults * feat(slack_alerting.py): provide multiple slack channels for a given alert type multiple channels might be interested in receiving an alert for a given type * docs(alerting.md): add multiple channel alerting to docs
40 lines
1.2 KiB
Python
40 lines
1.2 KiB
Python
"""
|
|
Transformation logic from OpenAI /v1/embeddings format to Bedrock Cohere /invoke format.
|
|
|
|
Why separate file? Make it easy to see how transformation works
|
|
"""
|
|
|
|
from typing import List
|
|
|
|
import litellm
|
|
from litellm.types.llms.bedrock import CohereEmbeddingRequest, CohereEmbeddingResponse
|
|
from litellm.types.utils import Embedding, EmbeddingResponse
|
|
|
|
|
|
class BedrockCohereEmbeddingConfig:
|
|
def __init__(self) -> None:
|
|
pass
|
|
|
|
def get_supported_openai_params(self) -> List[str]:
|
|
return ["encoding_format"]
|
|
|
|
def map_openai_params(
|
|
self, non_default_params: dict, optional_params: dict
|
|
) -> dict:
|
|
for k, v in non_default_params.items():
|
|
if k == "encoding_format":
|
|
optional_params["embedding_types"] = v
|
|
return optional_params
|
|
|
|
def _transform_request(
|
|
self, input: List[str], inference_params: dict
|
|
) -> CohereEmbeddingRequest:
|
|
transformed_request = CohereEmbeddingRequest(
|
|
texts=input,
|
|
input_type=litellm.COHERE_DEFAULT_EMBEDDING_INPUT_TYPE, # type: ignore
|
|
)
|
|
|
|
for k, v in inference_params.items():
|
|
transformed_request[k] = v # type: ignore
|
|
|
|
return transformed_request
|