mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
(test) init proxy router with gpt-vision enhancements
This commit is contained in:
parent
0c4b86c211
commit
67db73ab1a
1 changed files with 58 additions and 1 deletions
|
@ -23,6 +23,10 @@ load_dotenv()
|
||||||
|
|
||||||
def test_init_clients():
|
def test_init_clients():
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
|
import logging
|
||||||
|
from litellm._logging import verbose_router_logger
|
||||||
|
|
||||||
|
verbose_router_logger.setLevel(logging.DEBUG)
|
||||||
try:
|
try:
|
||||||
print("testing init 4 clients with diff timeouts")
|
print("testing init 4 clients with diff timeouts")
|
||||||
model_list = [
|
model_list = [
|
||||||
|
@ -39,7 +43,7 @@ def test_init_clients():
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
router = Router(model_list=model_list)
|
router = Router(model_list=model_list, set_verbose=True)
|
||||||
for elem in router.model_list:
|
for elem in router.model_list:
|
||||||
model_id = elem["model_info"]["id"]
|
model_id = elem["model_info"]["id"]
|
||||||
assert router.cache.get_cache(f"{model_id}_client") is not None
|
assert router.cache.get_cache(f"{model_id}_client") is not None
|
||||||
|
@ -55,6 +59,18 @@ def test_init_clients():
|
||||||
|
|
||||||
assert async_client.timeout == 0.01
|
assert async_client.timeout == 0.01
|
||||||
assert stream_async_client.timeout == 0.000_001
|
assert stream_async_client.timeout == 0.000_001
|
||||||
|
print(vars(async_client))
|
||||||
|
print()
|
||||||
|
print(async_client._base_url)
|
||||||
|
assert (
|
||||||
|
async_client._base_url
|
||||||
|
== "https://openai-gpt-4-test-v-1.openai.azure.com//openai/"
|
||||||
|
) # openai python adds the extra /
|
||||||
|
assert (
|
||||||
|
stream_async_client._base_url
|
||||||
|
== "https://openai-gpt-4-test-v-1.openai.azure.com//openai/"
|
||||||
|
)
|
||||||
|
|
||||||
print("PASSED !")
|
print("PASSED !")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -307,3 +323,44 @@ def test_xinference_embedding():
|
||||||
|
|
||||||
|
|
||||||
# test_xinference_embedding()
|
# test_xinference_embedding()
|
||||||
|
|
||||||
|
|
||||||
|
def test_router_init_gpt_4_vision_enhancements():
|
||||||
|
try:
|
||||||
|
# tests base_url set when any base_url with /openai/deployments passed to router
|
||||||
|
print("Testing Azure GPT_Vision enhancements")
|
||||||
|
|
||||||
|
model_list = [
|
||||||
|
{
|
||||||
|
"model_name": "gpt-4-vision-enhancements",
|
||||||
|
"litellm_params": {
|
||||||
|
"model": "azure/gpt-4-vision",
|
||||||
|
"api_key": os.getenv("AZURE_API_KEY"),
|
||||||
|
"base_url": "https://gpt-4-vision-resource.openai.azure.com/openai/deployments/gpt-4-vision/extensions/",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
router = Router(model_list=model_list)
|
||||||
|
|
||||||
|
print(router.model_list)
|
||||||
|
print(router.model_list[0])
|
||||||
|
|
||||||
|
assert (
|
||||||
|
router.model_list[0]["litellm_params"]["base_url"]
|
||||||
|
== "https://gpt-4-vision-resource.openai.azure.com/openai/deployments/gpt-4-vision/extensions/"
|
||||||
|
) # set in env
|
||||||
|
|
||||||
|
azure_client = router._get_client(
|
||||||
|
deployment=router.model_list[0],
|
||||||
|
kwargs={"stream": True, "model": "gpt-4-vision-enhancements"},
|
||||||
|
client_type="async",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert (
|
||||||
|
azure_client._base_url
|
||||||
|
== "https://gpt-4-vision-resource.openai.azure.com/openai/deployments/gpt-4-vision/extensions/"
|
||||||
|
)
|
||||||
|
print("passed")
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue