QA: ensure all bedrock regional models have same supported_ as base + Anthropic nested pydantic object support (#7844)

* build: ensure all regional bedrock models have same supported values as base bedrock model

prevents drift

* test(base_llm_unit_tests.py): add testing for nested pydantic objects

* fix(test_utils.py): add test_get_potential_model_names

* fix(anthropic/chat/transformation.py): support nested pydantic objects

Fixes https://github.com/BerriAI/litellm/issues/7755
This commit is contained in:
Krish Dholakia 2025-01-17 19:49:12 -08:00 committed by GitHub
parent 37ed49fe72
commit 6eb2346fd6
12 changed files with 259 additions and 62 deletions

View file

@ -307,3 +307,35 @@ def test_get_model_info_custom_model_router():
info = get_model_info("openai/meta-llama/Meta-Llama-3-8B-Instruct")
print("info", info)
assert info is not None
def test_get_model_info_bedrock_models():
"""
Check for drift in base model info for bedrock models and regional model info for bedrock models.
"""
from litellm import AmazonConverseConfig
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
litellm.model_cost = litellm.get_model_cost_map(url="")
for k, v in litellm.model_cost.items():
if v["litellm_provider"] == "bedrock":
k = k.replace("*/", "")
potential_commitments = [
"1-month-commitment",
"3-month-commitment",
"6-month-commitment",
]
if any(commitment in k for commitment in potential_commitments):
for commitment in potential_commitments:
k = k.replace(f"{commitment}/", "")
base_model = AmazonConverseConfig()._get_base_model(k)
base_model_info = litellm.model_cost[base_model]
for base_model_key, base_model_value in base_model_info.items():
if base_model_key.startswith("supports_"):
assert (
base_model_key in v
), f"{base_model_key} is not in model cost map for {k}"
assert (
v[base_model_key] == base_model_value
), f"{base_model_key} is not equal to {base_model_value} for model {k}"

View file

@ -1471,3 +1471,12 @@ def test_pick_cheapest_chat_model_from_llm_provider():
assert len(pick_cheapest_chat_models_from_llm_provider("openai", n=3)) == 3
assert len(pick_cheapest_chat_models_from_llm_provider("unknown", n=1)) == 0
def test_get_potential_model_names():
from litellm.utils import _get_potential_model_names
assert _get_potential_model_names(
model="bedrock/ap-northeast-1/anthropic.claude-instant-v1",
custom_llm_provider="bedrock",
)