mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
LiteLLM Minor Fixes & Improvements (10/24/2024) (#6421)
* fix(utils.py): support passing dynamic api base to validate_environment Returns True if just api base is required and api base is passed * fix(litellm_pre_call_utils.py): feature flag sending client headers to llm api Fixes https://github.com/BerriAI/litellm/issues/6410 * fix(anthropic/chat/transformation.py): return correct error message * fix(http_handler.py): add error response text in places where we expect it * fix(factory.py): handle base case of no non-system messages to bedrock Fixes https://github.com/BerriAI/litellm/issues/6411 * feat(cohere/embed): Support cohere image embeddings Closes https://github.com/BerriAI/litellm/issues/6413 * fix(__init__.py): fix linting error * docs(supported_embedding.md): add image embedding example to docs * feat(cohere/embed): use cohere embedding returned usage for cost calc * build(model_prices_and_context_window.json): add embed-english-v3.0 details (image cost + 'supports_image_input' flag) * fix(cohere_transformation.py): fix linting error * test(test_proxy_server.py): cleanup test * test: cleanup test * fix: fix linting errors
This commit is contained in:
parent
17082d5e7d
commit
197655bf2a
23 changed files with 417 additions and 150 deletions
160
litellm/llms/cohere/embed/transformation.py
Normal file
160
litellm/llms/cohere/embed/transformation.py
Normal file
|
@ -0,0 +1,160 @@
|
|||
"""
|
||||
Transformation logic from OpenAI /v1/embeddings format to Cohere's /v1/embed format.
|
||||
|
||||
Why separate file? Make it easy to see how transformation works
|
||||
|
||||
Convers
|
||||
- v3 embedding models
|
||||
- v2 embedding models
|
||||
|
||||
Docs - https://docs.cohere.com/v2/reference/embed
|
||||
"""
|
||||
|
||||
import types
|
||||
from typing import Any, List, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
from litellm import COHERE_DEFAULT_EMBEDDING_INPUT_TYPE
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.types.llms.bedrock import (
|
||||
COHERE_EMBEDDING_INPUT_TYPES,
|
||||
CohereEmbeddingRequest,
|
||||
CohereEmbeddingRequestWithModel,
|
||||
)
|
||||
from litellm.types.utils import (
|
||||
Embedding,
|
||||
EmbeddingResponse,
|
||||
PromptTokensDetailsWrapper,
|
||||
Usage,
|
||||
)
|
||||
from litellm.utils import is_base64_encoded
|
||||
|
||||
|
||||
class CohereEmbeddingConfig:
|
||||
"""
|
||||
Reference: https://docs.cohere.com/v2/reference/embed
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
def get_supported_openai_params(self) -> List[str]:
|
||||
return ["encoding_format"]
|
||||
|
||||
def map_openai_params(
|
||||
self, non_default_params: dict, optional_params: dict
|
||||
) -> dict:
|
||||
for k, v in non_default_params.items():
|
||||
if k == "encoding_format":
|
||||
optional_params["embedding_types"] = v
|
||||
return optional_params
|
||||
|
||||
def _is_v3_model(self, model: str) -> bool:
|
||||
return "3" in model
|
||||
|
||||
def _transform_request(
|
||||
self, model: str, input: List[str], inference_params: dict
|
||||
) -> CohereEmbeddingRequestWithModel:
|
||||
is_encoded = False
|
||||
for input_str in input:
|
||||
is_encoded = is_base64_encoded(input_str)
|
||||
|
||||
if is_encoded: # check if string is b64 encoded image or not
|
||||
transformed_request = CohereEmbeddingRequestWithModel(
|
||||
model=model,
|
||||
images=input,
|
||||
input_type="image",
|
||||
)
|
||||
else:
|
||||
transformed_request = CohereEmbeddingRequestWithModel(
|
||||
model=model,
|
||||
texts=input,
|
||||
input_type=COHERE_DEFAULT_EMBEDDING_INPUT_TYPE,
|
||||
)
|
||||
|
||||
for k, v in inference_params.items():
|
||||
transformed_request[k] = v # type: ignore
|
||||
|
||||
return transformed_request
|
||||
|
||||
def _calculate_usage(self, input: List[str], encoding: Any, meta: dict) -> Usage:
|
||||
|
||||
input_tokens = 0
|
||||
|
||||
text_tokens: Optional[int] = meta.get("billed_units", {}).get("input_tokens")
|
||||
|
||||
image_tokens: Optional[int] = meta.get("billed_units", {}).get("images")
|
||||
|
||||
prompt_tokens_details: Optional[PromptTokensDetailsWrapper] = None
|
||||
if image_tokens is None and text_tokens is None:
|
||||
for text in input:
|
||||
input_tokens += len(encoding.encode(text))
|
||||
else:
|
||||
prompt_tokens_details = PromptTokensDetailsWrapper(
|
||||
image_tokens=image_tokens,
|
||||
text_tokens=text_tokens,
|
||||
)
|
||||
if image_tokens:
|
||||
input_tokens += image_tokens
|
||||
if text_tokens:
|
||||
input_tokens += text_tokens
|
||||
|
||||
return Usage(
|
||||
prompt_tokens=input_tokens,
|
||||
completion_tokens=0,
|
||||
total_tokens=input_tokens,
|
||||
prompt_tokens_details=prompt_tokens_details,
|
||||
)
|
||||
|
||||
def _transform_response(
|
||||
self,
|
||||
response: httpx.Response,
|
||||
api_key: Optional[str],
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
data: Union[dict, CohereEmbeddingRequest],
|
||||
model_response: EmbeddingResponse,
|
||||
model: str,
|
||||
encoding: Any,
|
||||
input: list,
|
||||
) -> EmbeddingResponse:
|
||||
|
||||
response_json = response.json()
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=input,
|
||||
api_key=api_key,
|
||||
additional_args={"complete_input_dict": data},
|
||||
original_response=response_json,
|
||||
)
|
||||
"""
|
||||
response
|
||||
{
|
||||
'object': "list",
|
||||
'data': [
|
||||
|
||||
]
|
||||
'model',
|
||||
'usage'
|
||||
}
|
||||
"""
|
||||
embeddings = response_json["embeddings"]
|
||||
output_data = []
|
||||
for idx, embedding in enumerate(embeddings):
|
||||
output_data.append(
|
||||
{"object": "embedding", "index": idx, "embedding": embedding}
|
||||
)
|
||||
model_response.object = "list"
|
||||
model_response.data = output_data
|
||||
model_response.model = model
|
||||
input_tokens = 0
|
||||
for text in input:
|
||||
input_tokens += len(encoding.encode(text))
|
||||
|
||||
setattr(
|
||||
model_response,
|
||||
"usage",
|
||||
self._calculate_usage(input, encoding, response_json.get("meta", {})),
|
||||
)
|
||||
|
||||
return model_response
|
Loading…
Add table
Add a link
Reference in a new issue