(bug fix router.py) - safely handle choices=[] on llm responses (#8342)

* test fix test_router_with_empty_choices

* fix _should_raise_content_policy_error
This commit is contained in:
Ishaan Jaff 2025-02-06 18:22:08 -08:00 committed by GitHub
parent d2fec8bf13
commit 5dcb87a88b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -324,3 +324,28 @@ async def test_anthropic_router_completion_e2e(model_list):
AnthropicResponse.model_validate(response) AnthropicResponse.model_validate(response)
assert response.model == "gpt-3.5-turbo" assert response.model == "gpt-3.5-turbo"
@pytest.mark.asyncio
async def test_router_with_empty_choices(model_list):
"""
https://github.com/BerriAI/litellm/issues/8306
"""
router = Router(model_list=model_list)
mock_response = litellm.ModelResponse(
choices=[],
usage=litellm.Usage(
prompt_tokens=10,
completion_tokens=10,
total_tokens=20,
),
model="gpt-3.5-turbo",
object="chat.completion",
created=1723081200,
).model_dump()
response = await router.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello, how are you?"}],
mock_response=mock_response,
)
assert response is not None