mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Merge branch 'main' into feature/bedrock-claude-3-7-pdf
This commit is contained in:
commit
581f7c24f4
74 changed files with 2762 additions and 529 deletions
|
@ -41,8 +41,10 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
|||
|
||||
|
||||
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_mock_cache():
|
||||
from litellm.utils import _model_cache
|
||||
_model_cache.flush_cache()
|
||||
# Test 1: Check trimming of normal message
|
||||
def test_basic_trimming():
|
||||
messages = [
|
||||
|
@ -1539,6 +1541,7 @@ def test_get_valid_models_fireworks_ai(monkeypatch):
|
|||
litellm.module_level_client, "get", return_value=mock_response
|
||||
) as mock_post:
|
||||
valid_models = get_valid_models(check_provider_endpoint=True)
|
||||
print("valid_models", valid_models)
|
||||
mock_post.assert_called_once()
|
||||
assert (
|
||||
"fireworks_ai/accounts/fireworks/models/llama-3.1-8b-instruct"
|
||||
|
@ -2136,3 +2139,58 @@ def test_claude_3_7_sonnet_supports_pdf_input(model, expected_bool):
|
|||
from litellm.utils import supports_pdf_input
|
||||
|
||||
assert supports_pdf_input(model) == expected_bool
|
||||
|
||||
|
||||
def test_get_valid_models_from_provider():
|
||||
"""
|
||||
Test that get_valid_models returns the correct models for a given provider
|
||||
"""
|
||||
from litellm.utils import get_valid_models
|
||||
|
||||
valid_models = get_valid_models(custom_llm_provider="openai")
|
||||
assert len(valid_models) > 0
|
||||
assert "gpt-4o-mini" in valid_models
|
||||
|
||||
print("Valid models: ", valid_models)
|
||||
valid_models.remove("gpt-4o-mini")
|
||||
assert "gpt-4o-mini" not in valid_models
|
||||
|
||||
valid_models = get_valid_models(custom_llm_provider="openai")
|
||||
assert len(valid_models) > 0
|
||||
assert "gpt-4o-mini" in valid_models
|
||||
|
||||
|
||||
|
||||
def test_get_valid_models_from_provider_cache_invalidation(monkeypatch):
|
||||
"""
|
||||
Test that get_valid_models returns the correct models for a given provider
|
||||
"""
|
||||
from litellm.utils import _model_cache
|
||||
|
||||
monkeypatch.setenv("OPENAI_API_KEY", "123")
|
||||
|
||||
_model_cache.set_cached_model_info("openai", litellm_params=None, available_models=["gpt-4o-mini"])
|
||||
monkeypatch.delenv("OPENAI_API_KEY")
|
||||
|
||||
assert _model_cache.get_cached_model_info("openai") is None
|
||||
|
||||
|
||||
|
||||
def test_get_valid_models_from_dynamic_api_key():
|
||||
"""
|
||||
Test that get_valid_models returns the correct models for a given provider
|
||||
"""
|
||||
from litellm.utils import get_valid_models
|
||||
from litellm.types.router import CredentialLiteLLMParams
|
||||
|
||||
creds = CredentialLiteLLMParams(api_key="123")
|
||||
|
||||
valid_models = get_valid_models(custom_llm_provider="anthropic", litellm_params=creds, check_provider_endpoint=True)
|
||||
assert len(valid_models) == 0
|
||||
|
||||
creds = CredentialLiteLLMParams(api_key=os.getenv("ANTHROPIC_API_KEY"))
|
||||
valid_models = get_valid_models(custom_llm_provider="anthropic", litellm_params=creds, check_provider_endpoint=True)
|
||||
assert len(valid_models) > 0
|
||||
assert "anthropic/claude-3-7-sonnet-20250219" in valid_models
|
||||
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue