mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
[Feat] Add Support for DELETE /v1/responses/{response_id} on OpenAI, Azure OpenAI (#10205)
* add transform_delete_response_api_request to base responses config * add transform_delete_response_api_request * add delete_response_api_handler * fixes for deleting responses, response API * add adelete_responses * add async test_basic_openai_responses_delete_endpoint * test_basic_openai_responses_delete_endpoint * working delete for streaming on responses API * fixes azure transformation * TestAnthropicResponsesAPITest * fix code check * fix linting * fixes for get_complete_url * test_basic_openai_responses_streaming_delete_endpoint * streaming fixes
This commit is contained in:
parent
2bb51866b1
commit
868cdd0226
15 changed files with 729 additions and 83 deletions
|
@ -189,6 +189,90 @@ class BaseResponsesAPITest(ABC):
|
|||
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [False, True])
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_openai_responses_delete_endpoint(self, sync_mode):
|
||||
litellm._turn_on_debug()
|
||||
litellm.set_verbose = True
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
if sync_mode:
|
||||
response = litellm.responses(
|
||||
input="Basic ping", max_output_tokens=20,
|
||||
**base_completion_call_args
|
||||
)
|
||||
|
||||
# delete the response
|
||||
if isinstance(response, ResponsesAPIResponse):
|
||||
litellm.delete_responses(
|
||||
response_id=response.id,
|
||||
**base_completion_call_args
|
||||
)
|
||||
else:
|
||||
raise ValueError("response is not a ResponsesAPIResponse")
|
||||
else:
|
||||
response = await litellm.aresponses(
|
||||
input="Basic ping", max_output_tokens=20,
|
||||
**base_completion_call_args
|
||||
)
|
||||
|
||||
# async delete the response
|
||||
if isinstance(response, ResponsesAPIResponse):
|
||||
await litellm.adelete_responses(
|
||||
response_id=response.id,
|
||||
**base_completion_call_args
|
||||
)
|
||||
else:
|
||||
raise ValueError("response is not a ResponsesAPIResponse")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_openai_responses_streaming_delete_endpoint(self, sync_mode):
|
||||
#litellm._turn_on_debug()
|
||||
#litellm.set_verbose = True
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
response_id = None
|
||||
if sync_mode:
|
||||
response_id = None
|
||||
response = litellm.responses(
|
||||
input="Basic ping", max_output_tokens=20,
|
||||
stream=True,
|
||||
**base_completion_call_args
|
||||
)
|
||||
for event in response:
|
||||
print("litellm response=", json.dumps(event, indent=4, default=str))
|
||||
if "response" in event:
|
||||
response_obj = event.get("response")
|
||||
if response_obj is not None:
|
||||
response_id = response_obj.get("id")
|
||||
print("got response_id=", response_id)
|
||||
|
||||
# delete the response
|
||||
assert response_id is not None
|
||||
litellm.delete_responses(
|
||||
response_id=response_id,
|
||||
**base_completion_call_args
|
||||
)
|
||||
else:
|
||||
response = await litellm.aresponses(
|
||||
input="Basic ping", max_output_tokens=20,
|
||||
stream=True,
|
||||
**base_completion_call_args
|
||||
)
|
||||
async for event in response:
|
||||
print("litellm response=", json.dumps(event, indent=4, default=str))
|
||||
if "response" in event:
|
||||
response_obj = event.get("response")
|
||||
if response_obj is not None:
|
||||
response_id = response_obj.get("id")
|
||||
print("got response_id=", response_id)
|
||||
|
||||
# delete the response
|
||||
assert response_id is not None
|
||||
await litellm.adelete_responses(
|
||||
response_id=response_id,
|
||||
**base_completion_call_args
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue