From c6559753c7efe468d0b0ad2ee3fa774f05a651d9 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 1 Jun 2024 11:30:26 -0700 Subject: [PATCH] fix(test_scheduler.py): fix test --- litellm/tests/test_scheduler.py | 11 +++-------- litellm/tests/test_streaming.py | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/litellm/tests/test_scheduler.py b/litellm/tests/test_scheduler.py index bba06d587..93c061c49 100644 --- a/litellm/tests/test_scheduler.py +++ b/litellm/tests/test_scheduler.py @@ -77,9 +77,9 @@ async def test_scheduler_prioritized_requests(p0, p1): assert await scheduler.peek(id="10", model_name="gpt-3.5-turbo") == False -@pytest.mark.parametrize("p0, p1", [(0, 1)]) # (0, 0), (1, 0) +@pytest.mark.parametrize("p0, p1", [(0, 1), (0, 0), (1, 0)]) # @pytest.mark.asyncio -async def test_scheduler_prioritized_requests_mock_response(p0, p1): +async def test_aascheduler_prioritized_requests_mock_response(p0, p1): """ 2 requests for same model group @@ -94,7 +94,7 @@ async def test_scheduler_prioritized_requests_mock_response(p0, p1): "litellm_params": { "model": "gpt-3.5-turbo", "mock_response": "Hello world this is Macintosh!", - "rpm": 1, + "rpm": 0, }, }, ], @@ -106,11 +106,6 @@ async def test_scheduler_prioritized_requests_mock_response(p0, p1): scheduler.update_variables(llm_router=router) - await router.acompletion( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Hey!"}], - ) - async def _make_prioritized_call(flow_item: FlowItem): ## POLL QUEUE default_timeout = router.timeout diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index f281f8ec2..6c1cb6128 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -1410,7 +1410,7 @@ async def test_parallel_streaming_requests(sync_mode, model): ] def sync_test_streaming(): - response: litellm.CustomStreamWrapper = litellm.acompletion( # type: ignore + response: litellm.CustomStreamWrapper = litellm.completion( # type: ignore model=model, messages=messages, stream=True,