mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(bug fix router.py) - safely handle choices=[]
on llm responses (#8342)
* test fix test_router_with_empty_choices * fix _should_raise_content_policy_error
This commit is contained in:
parent
d2fec8bf13
commit
5dcb87a88b
1 changed files with 25 additions and 0 deletions
|
@ -324,3 +324,28 @@ async def test_anthropic_router_completion_e2e(model_list):
|
||||||
AnthropicResponse.model_validate(response)
|
AnthropicResponse.model_validate(response)
|
||||||
|
|
||||||
assert response.model == "gpt-3.5-turbo"
|
assert response.model == "gpt-3.5-turbo"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_router_with_empty_choices(model_list):
|
||||||
|
"""
|
||||||
|
https://github.com/BerriAI/litellm/issues/8306
|
||||||
|
"""
|
||||||
|
router = Router(model_list=model_list)
|
||||||
|
mock_response = litellm.ModelResponse(
|
||||||
|
choices=[],
|
||||||
|
usage=litellm.Usage(
|
||||||
|
prompt_tokens=10,
|
||||||
|
completion_tokens=10,
|
||||||
|
total_tokens=20,
|
||||||
|
),
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
object="chat.completion",
|
||||||
|
created=1723081200,
|
||||||
|
).model_dump()
|
||||||
|
response = await router.acompletion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
||||||
|
mock_response=mock_response,
|
||||||
|
)
|
||||||
|
assert response is not None
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue