forked from phoenix/litellm-mirror
fix(test_scheduler.py): fix test
This commit is contained in:
parent
a0fb301b18
commit
c6559753c7
2 changed files with 4 additions and 9 deletions
|
@ -77,9 +77,9 @@ async def test_scheduler_prioritized_requests(p0, p1):
|
||||||
assert await scheduler.peek(id="10", model_name="gpt-3.5-turbo") == False
|
assert await scheduler.peek(id="10", model_name="gpt-3.5-turbo") == False
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("p0, p1", [(0, 1)]) # (0, 0), (1, 0)
|
@pytest.mark.parametrize("p0, p1", [(0, 1), (0, 0), (1, 0)]) #
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_scheduler_prioritized_requests_mock_response(p0, p1):
|
async def test_aascheduler_prioritized_requests_mock_response(p0, p1):
|
||||||
"""
|
"""
|
||||||
2 requests for same model group
|
2 requests for same model group
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ async def test_scheduler_prioritized_requests_mock_response(p0, p1):
|
||||||
"litellm_params": {
|
"litellm_params": {
|
||||||
"model": "gpt-3.5-turbo",
|
"model": "gpt-3.5-turbo",
|
||||||
"mock_response": "Hello world this is Macintosh!",
|
"mock_response": "Hello world this is Macintosh!",
|
||||||
"rpm": 1,
|
"rpm": 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
@ -106,11 +106,6 @@ async def test_scheduler_prioritized_requests_mock_response(p0, p1):
|
||||||
|
|
||||||
scheduler.update_variables(llm_router=router)
|
scheduler.update_variables(llm_router=router)
|
||||||
|
|
||||||
await router.acompletion(
|
|
||||||
model="gpt-3.5-turbo",
|
|
||||||
messages=[{"role": "user", "content": "Hey!"}],
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _make_prioritized_call(flow_item: FlowItem):
|
async def _make_prioritized_call(flow_item: FlowItem):
|
||||||
## POLL QUEUE
|
## POLL QUEUE
|
||||||
default_timeout = router.timeout
|
default_timeout = router.timeout
|
||||||
|
|
|
@ -1410,7 +1410,7 @@ async def test_parallel_streaming_requests(sync_mode, model):
|
||||||
]
|
]
|
||||||
|
|
||||||
def sync_test_streaming():
|
def sync_test_streaming():
|
||||||
response: litellm.CustomStreamWrapper = litellm.acompletion( # type: ignore
|
response: litellm.CustomStreamWrapper = litellm.completion( # type: ignore
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
stream=True,
|
stream=True,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue