From 8da714104b311b7ec2e30f0b649bb74e69a94455 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 11 Mar 2025 17:48:15 -0700 Subject: [PATCH] ResponsesAPIStreamingResponse --- litellm/types/llms/openai.py | 5 +++++ .../test_openai_responses_api.py | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py index 4c0be0014d..f9e9a7a334 100644 --- a/litellm/types/llms/openai.py +++ b/litellm/types/llms/openai.py @@ -756,3 +756,8 @@ class ResponsesAPIResponse(TypedDict, total=False): truncation: Optional[Literal["auto", "disabled"]] usage: Optional[ResponseUsage] user: Optional[str] + + +class ResponsesAPIStreamingResponse(TypedDict, total=False): + type: str + response: ResponsesAPIResponse diff --git a/tests/llm_responses_api_testing/test_openai_responses_api.py b/tests/llm_responses_api_testing/test_openai_responses_api.py index dc4f9308ef..59024716d7 100644 --- a/tests/llm_responses_api_testing/test_openai_responses_api.py +++ b/tests/llm_responses_api_testing/test_openai_responses_api.py @@ -16,3 +16,16 @@ async def test_basic_openai_responses_api(): print("litellm response=", json.dumps(response, indent=4, default=str)) # validate_responses_api_response() + + +@pytest.mark.asyncio +async def test_basic_openai_responses_api_streaming(): + litellm._turn_on_debug() + response = await litellm.aresponses( + model="gpt-4o", + input="Tell me a three sentence bedtime story about a unicorn.", + stream=True, + ) + + for event in response: + print("litellm response=", json.dumps(event, indent=4, default=str))