mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
test(test_router.py): fix test check
This commit is contained in:
parent
373a627f54
commit
503fca88d4
1 changed files with 4 additions and 7 deletions
|
@ -880,9 +880,6 @@ def test_reading_openai_keys_os_environ():
|
||||||
assert float(model["litellm_params"]["timeout"]) == float(
|
assert float(model["litellm_params"]["timeout"]) == float(
|
||||||
os.environ["AZURE_TIMEOUT"]
|
os.environ["AZURE_TIMEOUT"]
|
||||||
), f"{model['litellm_params']['timeout']} vs {os.environ['AZURE_TIMEOUT']}"
|
), f"{model['litellm_params']['timeout']} vs {os.environ['AZURE_TIMEOUT']}"
|
||||||
assert float(model["litellm_params"]["stream_timeout"]) == float(
|
|
||||||
os.environ["AZURE_STREAM_TIMEOUT"]
|
|
||||||
), f"{model['litellm_params']['stream_timeout']} vs {os.environ['AZURE_STREAM_TIMEOUT']}"
|
|
||||||
assert int(model["litellm_params"]["max_retries"]) == int(
|
assert int(model["litellm_params"]["max_retries"]) == int(
|
||||||
os.environ["AZURE_MAX_RETRIES"]
|
os.environ["AZURE_MAX_RETRIES"]
|
||||||
), f"{model['litellm_params']['max_retries']} vs {os.environ['AZURE_MAX_RETRIES']}"
|
), f"{model['litellm_params']['max_retries']} vs {os.environ['AZURE_MAX_RETRIES']}"
|
||||||
|
@ -890,7 +887,7 @@ def test_reading_openai_keys_os_environ():
|
||||||
model_id = model["model_info"]["id"]
|
model_id = model["model_info"]["id"]
|
||||||
async_client: openai.AsyncOpenAI = router.cache.get_cache(key=f"{model_id}_async_client") # type: ignore
|
async_client: openai.AsyncOpenAI = router.cache.get_cache(key=f"{model_id}_async_client") # type: ignore
|
||||||
assert async_client.api_key == os.environ["OPENAI_API_KEY"]
|
assert async_client.api_key == os.environ["OPENAI_API_KEY"]
|
||||||
assert async_client.max_retries == (
|
assert async_client.max_retries == int(
|
||||||
os.environ["AZURE_MAX_RETRIES"]
|
os.environ["AZURE_MAX_RETRIES"]
|
||||||
), f"{async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
), f"{async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
||||||
assert async_client.timeout == (
|
assert async_client.timeout == (
|
||||||
|
@ -902,7 +899,7 @@ def test_reading_openai_keys_os_environ():
|
||||||
|
|
||||||
stream_async_client: openai.AsyncOpenAI = router.cache.get_cache(key=f"{model_id}_stream_async_client") # type: ignore
|
stream_async_client: openai.AsyncOpenAI = router.cache.get_cache(key=f"{model_id}_stream_async_client") # type: ignore
|
||||||
assert stream_async_client.api_key == os.environ["OPENAI_API_KEY"]
|
assert stream_async_client.api_key == os.environ["OPENAI_API_KEY"]
|
||||||
assert stream_async_client.max_retries == (
|
assert stream_async_client.max_retries == int(
|
||||||
os.environ["AZURE_MAX_RETRIES"]
|
os.environ["AZURE_MAX_RETRIES"]
|
||||||
), f"{stream_async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
), f"{stream_async_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
||||||
assert stream_async_client.timeout == (
|
assert stream_async_client.timeout == (
|
||||||
|
@ -913,7 +910,7 @@ def test_reading_openai_keys_os_environ():
|
||||||
print("\n Testing sync client")
|
print("\n Testing sync client")
|
||||||
client: openai.AzureOpenAI = router.cache.get_cache(key=f"{model_id}_client") # type: ignore
|
client: openai.AzureOpenAI = router.cache.get_cache(key=f"{model_id}_client") # type: ignore
|
||||||
assert client.api_key == os.environ["OPENAI_API_KEY"]
|
assert client.api_key == os.environ["OPENAI_API_KEY"]
|
||||||
assert client.max_retries == (
|
assert client.max_retries == int(
|
||||||
os.environ["AZURE_MAX_RETRIES"]
|
os.environ["AZURE_MAX_RETRIES"]
|
||||||
), f"{client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
), f"{client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
||||||
assert client.timeout == (
|
assert client.timeout == (
|
||||||
|
@ -924,7 +921,7 @@ def test_reading_openai_keys_os_environ():
|
||||||
print("\n Testing sync stream client")
|
print("\n Testing sync stream client")
|
||||||
stream_client: openai.AzureOpenAI = router.cache.get_cache(key=f"{model_id}_stream_client") # type: ignore
|
stream_client: openai.AzureOpenAI = router.cache.get_cache(key=f"{model_id}_stream_client") # type: ignore
|
||||||
assert stream_client.api_key == os.environ["OPENAI_API_KEY"]
|
assert stream_client.api_key == os.environ["OPENAI_API_KEY"]
|
||||||
assert stream_client.max_retries == (
|
assert stream_client.max_retries == int(
|
||||||
os.environ["AZURE_MAX_RETRIES"]
|
os.environ["AZURE_MAX_RETRIES"]
|
||||||
), f"{stream_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
), f"{stream_client.max_retries} vs {os.environ['AZURE_MAX_RETRIES']}"
|
||||||
assert stream_client.timeout == (
|
assert stream_client.timeout == (
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue