mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
* add test for using images with custom openai endpoints * run all otel tests * update name of test * add custom openai model to test config * add test for setting supports_vision=True for model * fix test guardrails aporia * docs supports vison * fix yaml * fix yaml * docs supports vision * fix bedrock guardrail test * fix cohere rerank test * update model_group doc string * add better prints on test
28 lines
787 B
Python
28 lines
787 B
Python
"""
|
|
/model/info test
|
|
"""
|
|
|
|
import httpx
|
|
import pytest
|
|
|
|
|
|
@pytest.mark.asyncio()
|
|
async def test_custom_model_supports_vision():
|
|
async with httpx.AsyncClient() as client:
|
|
response = await client.get(
|
|
"http://localhost:4000/model/info",
|
|
headers={"Authorization": "Bearer sk-1234"},
|
|
)
|
|
assert response.status_code == 200
|
|
|
|
data = response.json()["data"]
|
|
|
|
print("response from /model/info", data)
|
|
llava_model = next(
|
|
(model for model in data if model["model_name"] == "llava-hf"), None
|
|
)
|
|
|
|
assert llava_model is not None, "llava-hf model not found in response"
|
|
assert (
|
|
llava_model["model_info"]["supports_vision"] == True
|
|
), "llava-hf model should support vision"
|