forked from phoenix/litellm-mirror
(test) dynamic timeout on router
This commit is contained in:
parent
2f4cd3b569
commit
523415cb0c
1 changed files with 94 additions and 0 deletions
|
@ -37,6 +37,100 @@ def test_timeout():
|
||||||
# test_timeout()
|
# test_timeout()
|
||||||
|
|
||||||
|
|
||||||
|
def test_hanging_request_azure():
|
||||||
|
litellm.set_verbose = True
|
||||||
|
try:
|
||||||
|
router = litellm.Router(
|
||||||
|
model_list=[
|
||||||
|
{
|
||||||
|
"model_name": "azure-gpt",
|
||||||
|
"litellm_params": {
|
||||||
|
"model": "azure/chatgpt-v-2",
|
||||||
|
"api_base": os.environ["AZURE_API_BASE"],
|
||||||
|
"api_key": os.environ["AZURE_API_KEY"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"model_name": "openai-gpt",
|
||||||
|
"litellm_params": {"model": "gpt-3.5-turbo"},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
encoded = litellm.utils.encode(model="gpt-3.5-turbo", text="blue")[0]
|
||||||
|
response = router.completion(
|
||||||
|
model="azure-gpt",
|
||||||
|
messages=[{"role": "user", "content": "what color is red"}],
|
||||||
|
logit_bias={encoded: 100},
|
||||||
|
timeout=0.01,
|
||||||
|
)
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
if response.choices[0].message.content is not None:
|
||||||
|
pytest.fail("Got a response, expected a timeout")
|
||||||
|
except openai.APITimeoutError as e:
|
||||||
|
print(
|
||||||
|
"Passed: Raised correct exception. Got openai.APITimeoutError\nGood Job", e
|
||||||
|
)
|
||||||
|
print(type(e))
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(
|
||||||
|
f"Did not raise error `openai.APITimeoutError`. Instead raised error type: {type(e)}, Error: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
test_hanging_request_azure()
|
||||||
|
|
||||||
|
|
||||||
|
def test_hanging_request_openai():
|
||||||
|
litellm.set_verbose = True
|
||||||
|
try:
|
||||||
|
router = litellm.Router(
|
||||||
|
model_list=[
|
||||||
|
{
|
||||||
|
"model_name": "azure-gpt",
|
||||||
|
"litellm_params": {
|
||||||
|
"model": "azure/chatgpt-v-2",
|
||||||
|
"api_base": os.environ["AZURE_API_BASE"],
|
||||||
|
"api_key": os.environ["AZURE_API_KEY"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"model_name": "openai-gpt",
|
||||||
|
"litellm_params": {"model": "gpt-3.5-turbo"},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
encoded = litellm.utils.encode(model="gpt-3.5-turbo", text="blue")[0]
|
||||||
|
response = router.completion(
|
||||||
|
model="openai-gpt",
|
||||||
|
messages=[{"role": "user", "content": "what color is red"}],
|
||||||
|
logit_bias={encoded: 100},
|
||||||
|
timeout=0.01,
|
||||||
|
)
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
if response.choices[0].message.content is not None:
|
||||||
|
pytest.fail("Got a response, expected a timeout")
|
||||||
|
except openai.APITimeoutError as e:
|
||||||
|
print(
|
||||||
|
"Passed: Raised correct exception. Got openai.APITimeoutError\nGood Job", e
|
||||||
|
)
|
||||||
|
print(type(e))
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(
|
||||||
|
f"Did not raise error `openai.APITimeoutError`. Instead raised error type: {type(e)}, Error: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
test_hanging_request_openai()
|
||||||
|
|
||||||
|
# test_timeout()
|
||||||
|
|
||||||
|
|
||||||
def test_timeout_streaming():
|
def test_timeout_streaming():
|
||||||
# this Will Raise a timeout
|
# this Will Raise a timeout
|
||||||
litellm.set_verbose = False
|
litellm.set_verbose = False
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue