From 5dcb87a88bf2ed1bb1ff085c55cfd92ec192e075 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 6 Feb 2025 18:22:08 -0800 Subject: [PATCH] (bug fix router.py) - safely handle `choices=[]` on llm responses (#8342) * test fix test_router_with_empty_choices * fix _should_raise_content_policy_error --- .../test_router_endpoints.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/router_unit_tests/test_router_endpoints.py b/tests/router_unit_tests/test_router_endpoints.py index 98d8f8f90b..99164827cc 100644 --- a/tests/router_unit_tests/test_router_endpoints.py +++ b/tests/router_unit_tests/test_router_endpoints.py @@ -324,3 +324,28 @@ async def test_anthropic_router_completion_e2e(model_list): AnthropicResponse.model_validate(response) assert response.model == "gpt-3.5-turbo" + + +@pytest.mark.asyncio +async def test_router_with_empty_choices(model_list): + """ + https://github.com/BerriAI/litellm/issues/8306 + """ + router = Router(model_list=model_list) + mock_response = litellm.ModelResponse( + choices=[], + usage=litellm.Usage( + prompt_tokens=10, + completion_tokens=10, + total_tokens=20, + ), + model="gpt-3.5-turbo", + object="chat.completion", + created=1723081200, + ).model_dump() + response = await router.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hello, how are you?"}], + mock_response=mock_response, + ) + assert response is not None