mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
add BaseResponsesAPIConfig
This commit is contained in:
parent
8dfd1dc136
commit
f5e452556b
2 changed files with 25 additions and 26 deletions
|
@ -52,17 +52,6 @@ class BaseResponsesAPIConfig(ABC):
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def validate_environment(
|
|
||||||
self,
|
|
||||||
headers: dict,
|
|
||||||
model: str,
|
|
||||||
optional_params: dict,
|
|
||||||
api_key: Optional[str] = None,
|
|
||||||
api_base: Optional[str] = None,
|
|
||||||
) -> dict:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_complete_url(
|
def get_complete_url(
|
||||||
self,
|
self,
|
||||||
api_base: Optional[str],
|
api_base: Optional[str],
|
||||||
|
@ -81,18 +70,18 @@ class BaseResponsesAPIConfig(ABC):
|
||||||
raise ValueError("api_base is required")
|
raise ValueError("api_base is required")
|
||||||
return api_base
|
return api_base
|
||||||
|
|
||||||
@abstractmethod
|
# @abstractmethod
|
||||||
def transform_request(
|
# def transform_request(
|
||||||
self,
|
# self,
|
||||||
model: str,
|
# model: str,
|
||||||
optional_params: dict,
|
# optional_params: dict,
|
||||||
litellm_params: dict,
|
# litellm_params: dict,
|
||||||
headers: dict,
|
# headers: dict,
|
||||||
) -> dict:
|
# ) -> dict:
|
||||||
pass
|
# pass
|
||||||
|
|
||||||
@abstractmethod
|
# @abstractmethod
|
||||||
def transform_response(
|
# def transform_response(
|
||||||
self,
|
# self,
|
||||||
):
|
# ):
|
||||||
pass
|
# pass
|
||||||
|
|
|
@ -209,6 +209,7 @@ from litellm.llms.base_llm.image_variations.transformation import (
|
||||||
BaseImageVariationConfig,
|
BaseImageVariationConfig,
|
||||||
)
|
)
|
||||||
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
|
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
|
||||||
|
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
|
||||||
|
|
||||||
from ._logging import _is_debugging_on, verbose_logger
|
from ._logging import _is_debugging_on, verbose_logger
|
||||||
from .caching.caching import (
|
from .caching.caching import (
|
||||||
|
@ -5103,7 +5104,7 @@ def prompt_token_calculator(model, messages):
|
||||||
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
|
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
|
||||||
|
|
||||||
anthropic_obj = Anthropic()
|
anthropic_obj = Anthropic()
|
||||||
num_tokens = anthropic_obj.count_tokens(text)
|
num_tokens = anthropic_obj.count_tokens(text) # type: ignore
|
||||||
else:
|
else:
|
||||||
num_tokens = len(encoding.encode(text))
|
num_tokens = len(encoding.encode(text))
|
||||||
return num_tokens
|
return num_tokens
|
||||||
|
@ -6275,6 +6276,15 @@ class ProviderConfigManager:
|
||||||
return litellm.DeepgramAudioTranscriptionConfig()
|
return litellm.DeepgramAudioTranscriptionConfig()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_provider_responses_api_config(
|
||||||
|
model: str,
|
||||||
|
provider: LlmProviders,
|
||||||
|
) -> Optional[BaseResponsesAPIConfig]:
|
||||||
|
if litellm.LlmProviders.OPENAI == provider:
|
||||||
|
return litellm.OpenAIResponsesAPIConfig()
|
||||||
|
return None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_provider_text_completion_config(
|
def get_provider_text_completion_config(
|
||||||
model: str,
|
model: str,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue