mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
* fix(utils.py): support passing dynamic api base to validate_environment Returns True if just api base is required and api base is passed * fix(litellm_pre_call_utils.py): feature flag sending client headers to llm api Fixes https://github.com/BerriAI/litellm/issues/6410 * fix(anthropic/chat/transformation.py): return correct error message * fix(http_handler.py): add error response text in places where we expect it * fix(factory.py): handle base case of no non-system messages to bedrock Fixes https://github.com/BerriAI/litellm/issues/6411 * feat(cohere/embed): Support cohere image embeddings Closes https://github.com/BerriAI/litellm/issues/6413 * fix(__init__.py): fix linting error * docs(supported_embedding.md): add image embedding example to docs * feat(cohere/embed): use cohere embedding returned usage for cost calc * build(model_prices_and_context_window.json): add embed-english-v3.0 details (image cost + 'supports_image_input' flag) * fix(cohere_transformation.py): fix linting error * test(test_proxy_server.py): cleanup test * test: cleanup test * fix: fix linting errors
47 lines
1.5 KiB
Python
47 lines
1.5 KiB
Python
"""
|
|
Transformation logic from OpenAI /v1/embeddings format to Bedrock Cohere /invoke format.
|
|
|
|
Why separate file? Make it easy to see how transformation works
|
|
"""
|
|
|
|
from typing import List
|
|
|
|
import litellm
|
|
from litellm.llms.cohere.embed.transformation import CohereEmbeddingConfig
|
|
from litellm.types.llms.bedrock import CohereEmbeddingRequest, CohereEmbeddingResponse
|
|
from litellm.types.utils import Embedding, EmbeddingResponse
|
|
|
|
|
|
class BedrockCohereEmbeddingConfig:
|
|
def __init__(self) -> None:
|
|
pass
|
|
|
|
def get_supported_openai_params(self) -> List[str]:
|
|
return ["encoding_format"]
|
|
|
|
def map_openai_params(
|
|
self, non_default_params: dict, optional_params: dict
|
|
) -> dict:
|
|
for k, v in non_default_params.items():
|
|
if k == "encoding_format":
|
|
optional_params["embedding_types"] = v
|
|
return optional_params
|
|
|
|
def _is_v3_model(self, model: str) -> bool:
|
|
return "3" in model
|
|
|
|
def _transform_request(
|
|
self, model: str, input: List[str], inference_params: dict
|
|
) -> CohereEmbeddingRequest:
|
|
transformed_request = CohereEmbeddingConfig()._transform_request(
|
|
model, input, inference_params
|
|
)
|
|
|
|
new_transformed_request = CohereEmbeddingRequest(
|
|
input_type=transformed_request["input_type"],
|
|
)
|
|
for k in CohereEmbeddingRequest.__annotations__.keys():
|
|
if k in transformed_request:
|
|
new_transformed_request[k] = transformed_request[k] # type: ignore
|
|
|
|
return new_transformed_request
|