mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
Support discovering gemini, anthropic, xai models by calling their /v1/model
endpoint (#9530)
* fix: initial commit for adding provider model discovery to gemini * feat(gemini/): add model discovery for gemini/ route * docs(set_keys.md): update docs to show you can check available gemini models as well * feat(anthropic/): add model discovery for anthropic api key * feat(xai/): add model discovery for XAI enables checking what models an xai key can call * ci: bump ci config yml * fix(topaz/common_utils.py): fix linting error * fix: fix linting error for python38
This commit is contained in:
parent
340a63bace
commit
ccbac691e5
9 changed files with 223 additions and 6 deletions
|
@ -303,6 +303,24 @@ def test_aget_valid_models():
|
|||
os.environ = old_environ
|
||||
|
||||
|
||||
@pytest.mark.parametrize("custom_llm_provider", ["gemini", "anthropic", "xai"])
|
||||
def test_get_valid_models_with_custom_llm_provider(custom_llm_provider):
|
||||
from litellm.utils import ProviderConfigManager
|
||||
from litellm.types.utils import LlmProviders
|
||||
|
||||
provider_config = ProviderConfigManager.get_provider_model_info(
|
||||
model=None,
|
||||
provider=LlmProviders(custom_llm_provider),
|
||||
)
|
||||
assert provider_config is not None
|
||||
valid_models = get_valid_models(
|
||||
check_provider_endpoint=True, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
print(valid_models)
|
||||
assert len(valid_models) > 0
|
||||
assert provider_config.get_models() == valid_models
|
||||
|
||||
|
||||
# test_get_valid_models()
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue