diff --git a/litellm/llms/openai/openai.py b/litellm/llms/openai/openai.py index 13412ef96a..6ea4c8c004 100644 --- a/litellm/llms/openai/openai.py +++ b/litellm/llms/openai/openai.py @@ -459,6 +459,7 @@ class OpenAIChatCompletion(BaseLLM, BaseOpenAILLM): else: headers = {} response = raw_response.parse() + # raw_response.http_response.close() return headers, response except Exception as e: if raw_response is not None: diff --git a/tests/load_tests/test_memory_usage.py b/tests/load_tests/test_memory_usage.py index f273865a29..63c54f2052 100644 --- a/tests/load_tests/test_memory_usage.py +++ b/tests/load_tests/test_memory_usage.py @@ -83,6 +83,13 @@ async def make_text_completion_request(): api_base="https://exampleopenaiendpoint-production.up.railway.app/", ) +def make_streaming_completion_request(): + return litellm.acompletion( + model="openai/gpt-4o", + messages=[{"role": "user", "content": "Test message for memory usage"}], + stream=True, + ) + @pytest.mark.asyncio @pytest.mark.skip( @@ -102,6 +109,20 @@ async def test_atext_completion_memory(): await run_memory_test(make_text_completion_request, "atext_completion") +@pytest.mark.skip( + reason="This test is too slow to run on every commit. We can use this after nightly release" +) +def test_streaming_completion_memory(): + """Test memory usage for streaming litellm.acompletion""" + run_memory_test(make_streaming_completion_request,"completion") + +@pytest.mark.skip( + reason="This test is too slow to run on every commit. We can use this after nightly release" +) +def test_streaming_acompletion_memory(): + """Test memory usage for streaming litellm.atext_completion""" + run_memory_test(make_streaming_completion_request,"acompletion") + litellm_router = Router( model_list=[ {