litellm/tests/otel_tests/test_model_info.py
Ishaan Jaff 1973ae8fb8
[Feat] Allow setting supports_vision for Custom OpenAI endpoints + Added testing (#5821)
* add test for using images with custom openai endpoints

* run all otel tests

* update name of test

* add custom openai model to test config

* add test for setting supports_vision=True for model

* fix test guardrails aporia

* docs supports vison

* fix yaml

* fix yaml

* docs supports vision

* fix bedrock guardrail test

* fix cohere rerank test

* update model_group doc string

* add better prints on test
2024-09-21 11:35:55 -07:00

28 lines
787 B
Python

"""
/model/info test
"""
import httpx
import pytest
@pytest.mark.asyncio()
async def test_custom_model_supports_vision():
async with httpx.AsyncClient() as client:
response = await client.get(
"http://localhost:4000/model/info",
headers={"Authorization": "Bearer sk-1234"},
)
assert response.status_code == 200
data = response.json()["data"]
print("response from /model/info", data)
llava_model = next(
(model for model in data if model["model_name"] == "llava-hf"), None
)
assert llava_model is not None, "llava-hf model not found in response"
assert (
llava_model["model_info"]["supports_vision"] == True
), "llava-hf model should support vision"