add BaseResponsesAPIConfig

This commit is contained in:
Ishaan Jaff 2025-03-11 15:57:53 -07:00
parent 8dfd1dc136
commit f5e452556b
2 changed files with 25 additions and 26 deletions

View file

@ -52,17 +52,6 @@ class BaseResponsesAPIConfig(ABC):
pass
@abstractmethod
def validate_environment(
self,
headers: dict,
model: str,
optional_params: dict,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
) -> dict:
pass
def get_complete_url(
self,
api_base: Optional[str],
@ -81,18 +70,18 @@ class BaseResponsesAPIConfig(ABC):
raise ValueError("api_base is required")
return api_base
@abstractmethod
def transform_request(
self,
model: str,
optional_params: dict,
litellm_params: dict,
headers: dict,
) -> dict:
pass
# @abstractmethod
# def transform_request(
# self,
# model: str,
# optional_params: dict,
# litellm_params: dict,
# headers: dict,
# ) -> dict:
# pass
@abstractmethod
def transform_response(
self,
):
pass
# @abstractmethod
# def transform_response(
# self,
# ):
# pass

View file

@ -209,6 +209,7 @@ from litellm.llms.base_llm.image_variations.transformation import (
BaseImageVariationConfig,
)
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from ._logging import _is_debugging_on, verbose_logger
from .caching.caching import (
@ -5103,7 +5104,7 @@ def prompt_token_calculator(model, messages):
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
anthropic_obj = Anthropic()
num_tokens = anthropic_obj.count_tokens(text)
num_tokens = anthropic_obj.count_tokens(text) # type: ignore
else:
num_tokens = len(encoding.encode(text))
return num_tokens
@ -6275,6 +6276,15 @@ class ProviderConfigManager:
return litellm.DeepgramAudioTranscriptionConfig()
return None
@staticmethod
def get_provider_responses_api_config(
model: str,
provider: LlmProviders,
) -> Optional[BaseResponsesAPIConfig]:
if litellm.LlmProviders.OPENAI == provider:
return litellm.OpenAIResponsesAPIConfig()
return None
@staticmethod
def get_provider_text_completion_config(
model: str,