Merge pull request #4661 from BerriAI/litellm_fix_mh

[Fix] Model Hub - Show supports vision correctly
This commit is contained in:
Ishaan Jaff 2024-07-11 15:03:37 -07:00 committed by GitHub
commit 8bf50ac5db
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 16 additions and 2 deletions

View file

@ -4,6 +4,9 @@ model_list:
model: openai/fake
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
- model_name: gemini-flash
litellm_params:
model: gemini/gemini-1.5-flash
general_settings:
master_key: sk-1234

View file

@ -1,13 +1,16 @@
# What is this?
## Unit testing for the 'get_model_info()' function
import os, sys, traceback
import os
import sys
import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest
import litellm
from litellm import get_model_info
import pytest
def test_get_model_info_simple_model_name():
@ -37,3 +40,9 @@ def test_get_model_info_custom_llm_with_same_name_vllm():
pytest.fail("Expected get model info to fail for an unmapped model/provider")
except Exception:
pass
def test_get_model_info_shows_correct_supports_vision():
info = litellm.get_model_info("gemini/gemini-1.5-flash")
print("info", info)
assert info["supports_vision"] is True

View file

@ -73,6 +73,7 @@ class ModelInfo(TypedDict, total=False):
supported_openai_params: Required[Optional[List[str]]]
supports_system_messages: Optional[bool]
supports_response_schema: Optional[bool]
supports_vision: Optional[bool]
class GenericStreamingChunk(TypedDict):

View file

@ -4829,6 +4829,7 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
supports_response_schema=_model_info.get(
"supports_response_schema", None
),
supports_vision=_model_info.get("supports_vision", None),
)
except Exception:
raise Exception(