mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 46s
* VoyageEmbeddingConfig * fix voyage logic to get params * add voyage embedding transformation * add get_provider_embedding_config * use BaseEmbeddingConfig * voyage clean up * use llm http handler for embedding transformations * test_voyage_ai_embedding_extra_params * add voyage async * test_voyage_ai_embedding_extra_params * add async for llm http handler * update BaseLLMEmbeddingTest * test_voyage_ai_embedding_extra_params * fix linting * fix get_provider_embedding_config * fix anthropic text test * update location of base/chat/transformation * fix import path * fix IBMWatsonXAIConfig
26 lines
859 B
Python
26 lines
859 B
Python
"""
|
|
Translate from OpenAI's `/v1/chat/completions` to Sagemaker's `/invocations` API
|
|
|
|
Called if Sagemaker endpoint supports HF Messages API.
|
|
|
|
LiteLLM Docs: https://docs.litellm.ai/docs/providers/aws_sagemaker#sagemaker-messages-api
|
|
Huggingface Docs: https://huggingface.co/docs/text-generation-inference/en/messages_api
|
|
"""
|
|
|
|
from typing import Union
|
|
|
|
from httpx._models import Headers
|
|
|
|
from litellm.llms.base_llm.chat.transformation import BaseLLMException
|
|
|
|
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
|
|
from ..common_utils import SagemakerError
|
|
|
|
|
|
class SagemakerChatConfig(OpenAIGPTConfig):
|
|
def get_error_class(
|
|
self, error_message: str, status_code: int, headers: Union[dict, Headers]
|
|
) -> BaseLLMException:
|
|
return SagemakerError(
|
|
status_code=status_code, message=error_message, headers=headers
|
|
)
|