mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Add exception mapping for responses API
This commit is contained in:
parent
f632e93dd1
commit
c2ed7add37
2 changed files with 99 additions and 72 deletions
|
@ -128,7 +128,6 @@ def exception_type( # type: ignore # noqa: PLR0915
|
||||||
extra_kwargs: Optional[dict] = {},
|
extra_kwargs: Optional[dict] = {},
|
||||||
):
|
):
|
||||||
"""Maps an LLM Provider Exception to OpenAI Exception Format"""
|
"""Maps an LLM Provider Exception to OpenAI Exception Format"""
|
||||||
|
|
||||||
if any(
|
if any(
|
||||||
isinstance(original_exception, exc_type)
|
isinstance(original_exception, exc_type)
|
||||||
for exc_type in litellm.LITELLM_EXCEPTION_TYPES
|
for exc_type in litellm.LITELLM_EXCEPTION_TYPES
|
||||||
|
|
|
@ -58,15 +58,24 @@ async def aresponses(
|
||||||
extra_query: Optional[Dict[str, Any]] = None,
|
extra_query: Optional[Dict[str, Any]] = None,
|
||||||
extra_body: Optional[Dict[str, Any]] = None,
|
extra_body: Optional[Dict[str, Any]] = None,
|
||||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||||
|
# LiteLLM specific params,
|
||||||
|
custom_llm_provider: Optional[str] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]:
|
) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]:
|
||||||
"""
|
"""
|
||||||
Async: Handles responses API requests by reusing the synchronous function
|
Async: Handles responses API requests by reusing the synchronous function
|
||||||
"""
|
"""
|
||||||
|
local_vars = locals()
|
||||||
try:
|
try:
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
kwargs["aresponses"] = True
|
kwargs["aresponses"] = True
|
||||||
|
|
||||||
|
# get custom llm provider so we can use this for mapping exceptions
|
||||||
|
if custom_llm_provider is None:
|
||||||
|
_, custom_llm_provider, _, _ = litellm.get_llm_provider(
|
||||||
|
model=model, api_base=local_vars.get("base_url", None)
|
||||||
|
)
|
||||||
|
|
||||||
func = partial(
|
func = partial(
|
||||||
responses,
|
responses,
|
||||||
input=input,
|
input=input,
|
||||||
|
@ -91,6 +100,7 @@ async def aresponses(
|
||||||
extra_query=extra_query,
|
extra_query=extra_query,
|
||||||
extra_body=extra_body,
|
extra_body=extra_body,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,7 +114,13 @@ async def aresponses(
|
||||||
response = init_response
|
response = init_response
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise e
|
raise litellm.exception_type(
|
||||||
|
model=model,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
original_exception=e,
|
||||||
|
completion_kwargs=local_vars,
|
||||||
|
extra_kwargs=kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@client
|
@client
|
||||||
|
@ -133,85 +149,97 @@ def responses(
|
||||||
extra_query: Optional[Dict[str, Any]] = None,
|
extra_query: Optional[Dict[str, Any]] = None,
|
||||||
extra_body: Optional[Dict[str, Any]] = None,
|
extra_body: Optional[Dict[str, Any]] = None,
|
||||||
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
timeout: Optional[Union[float, httpx.Timeout]] = None,
|
||||||
|
# LiteLLM specific params,
|
||||||
|
custom_llm_provider: Optional[str] = None,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Synchronous version of the Responses API.
|
Synchronous version of the Responses API.
|
||||||
Uses the synchronous HTTP handler to make requests.
|
Uses the synchronous HTTP handler to make requests.
|
||||||
"""
|
"""
|
||||||
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
|
|
||||||
litellm_call_id: Optional[str] = kwargs.get("litellm_call_id", None)
|
|
||||||
_is_async = kwargs.pop("aresponses", False) is True
|
|
||||||
|
|
||||||
# get llm provider logic
|
|
||||||
litellm_params = GenericLiteLLMParams(**kwargs)
|
|
||||||
model, custom_llm_provider, dynamic_api_key, dynamic_api_base = (
|
|
||||||
litellm.get_llm_provider(
|
|
||||||
model=model,
|
|
||||||
custom_llm_provider=kwargs.get("custom_llm_provider", None),
|
|
||||||
api_base=litellm_params.api_base,
|
|
||||||
api_key=litellm_params.api_key,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# get provider config
|
|
||||||
responses_api_provider_config: Optional[BaseResponsesAPIConfig] = (
|
|
||||||
ProviderConfigManager.get_provider_responses_api_config(
|
|
||||||
model=model,
|
|
||||||
provider=litellm.LlmProviders(custom_llm_provider),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if responses_api_provider_config is None:
|
|
||||||
raise litellm.BadRequestError(
|
|
||||||
model=model,
|
|
||||||
llm_provider=custom_llm_provider,
|
|
||||||
message=f"Responses API not available for custom_llm_provider={custom_llm_provider}, model: {model}",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Get all parameters using locals() and combine with kwargs
|
|
||||||
local_vars = locals()
|
local_vars = locals()
|
||||||
local_vars.update(kwargs)
|
try:
|
||||||
# Get ResponsesAPIOptionalRequestParams with only valid parameters
|
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
|
||||||
response_api_optional_params: ResponsesAPIOptionalRequestParams = (
|
litellm_call_id: Optional[str] = kwargs.get("litellm_call_id", None)
|
||||||
ResponsesAPIRequestUtils.get_requested_response_api_optional_param(local_vars)
|
_is_async = kwargs.pop("aresponses", False) is True
|
||||||
)
|
|
||||||
|
|
||||||
# Get optional parameters for the responses API
|
# get llm provider logic
|
||||||
responses_api_request_params: Dict = (
|
litellm_params = GenericLiteLLMParams(**kwargs)
|
||||||
ResponsesAPIRequestUtils.get_optional_params_responses_api(
|
model, custom_llm_provider, dynamic_api_key, dynamic_api_base = (
|
||||||
model=model,
|
litellm.get_llm_provider(
|
||||||
responses_api_provider_config=responses_api_provider_config,
|
model=model,
|
||||||
response_api_optional_params=response_api_optional_params,
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
api_base=litellm_params.api_base,
|
||||||
|
api_key=litellm_params.api_key,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
# Pre Call logging
|
# get provider config
|
||||||
litellm_logging_obj.update_environment_variables(
|
responses_api_provider_config: Optional[BaseResponsesAPIConfig] = (
|
||||||
model=model,
|
ProviderConfigManager.get_provider_responses_api_config(
|
||||||
user=user,
|
model=model,
|
||||||
optional_params=dict(responses_api_request_params),
|
provider=litellm.LlmProviders(custom_llm_provider),
|
||||||
litellm_params={
|
)
|
||||||
"litellm_call_id": litellm_call_id,
|
)
|
||||||
**responses_api_request_params,
|
|
||||||
},
|
|
||||||
custom_llm_provider=custom_llm_provider,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Call the handler with _is_async flag instead of directly calling the async handler
|
if responses_api_provider_config is None:
|
||||||
response = base_llm_http_handler.response_api_handler(
|
raise litellm.BadRequestError(
|
||||||
model=model,
|
model=model,
|
||||||
input=input,
|
llm_provider=custom_llm_provider,
|
||||||
responses_api_provider_config=responses_api_provider_config,
|
message=f"Responses API not available for custom_llm_provider={custom_llm_provider}, model: {model}",
|
||||||
response_api_optional_request_params=responses_api_request_params,
|
)
|
||||||
custom_llm_provider=custom_llm_provider,
|
|
||||||
litellm_params=litellm_params,
|
|
||||||
logging_obj=litellm_logging_obj,
|
|
||||||
extra_headers=extra_headers,
|
|
||||||
extra_body=extra_body,
|
|
||||||
timeout=timeout or request_timeout,
|
|
||||||
_is_async=_is_async,
|
|
||||||
client=kwargs.get("client"),
|
|
||||||
)
|
|
||||||
|
|
||||||
return response
|
local_vars.update(kwargs)
|
||||||
|
# Get ResponsesAPIOptionalRequestParams with only valid parameters
|
||||||
|
response_api_optional_params: ResponsesAPIOptionalRequestParams = (
|
||||||
|
ResponsesAPIRequestUtils.get_requested_response_api_optional_param(
|
||||||
|
local_vars
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get optional parameters for the responses API
|
||||||
|
responses_api_request_params: Dict = (
|
||||||
|
ResponsesAPIRequestUtils.get_optional_params_responses_api(
|
||||||
|
model=model,
|
||||||
|
responses_api_provider_config=responses_api_provider_config,
|
||||||
|
response_api_optional_params=response_api_optional_params,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pre Call logging
|
||||||
|
litellm_logging_obj.update_environment_variables(
|
||||||
|
model=model,
|
||||||
|
user=user,
|
||||||
|
optional_params=dict(responses_api_request_params),
|
||||||
|
litellm_params={
|
||||||
|
"litellm_call_id": litellm_call_id,
|
||||||
|
**responses_api_request_params,
|
||||||
|
},
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Call the handler with _is_async flag instead of directly calling the async handler
|
||||||
|
response = base_llm_http_handler.response_api_handler(
|
||||||
|
model=model,
|
||||||
|
input=input,
|
||||||
|
responses_api_provider_config=responses_api_provider_config,
|
||||||
|
response_api_optional_request_params=responses_api_request_params,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
litellm_params=litellm_params,
|
||||||
|
logging_obj=litellm_logging_obj,
|
||||||
|
extra_headers=extra_headers,
|
||||||
|
extra_body=extra_body,
|
||||||
|
timeout=timeout or request_timeout,
|
||||||
|
_is_async=_is_async,
|
||||||
|
client=kwargs.get("client"),
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
except Exception as e:
|
||||||
|
raise litellm.exception_type(
|
||||||
|
model=model,
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
original_exception=e,
|
||||||
|
completion_kwargs=local_vars,
|
||||||
|
extra_kwargs=kwargs,
|
||||||
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue