mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
QA: ensure all bedrock regional models have same supported_
as base + Anthropic nested pydantic object support (#7844)
* build: ensure all regional bedrock models have same supported values as base bedrock model prevents drift * test(base_llm_unit_tests.py): add testing for nested pydantic objects * fix(test_utils.py): add test_get_potential_model_names * fix(anthropic/chat/transformation.py): support nested pydantic objects Fixes https://github.com/BerriAI/litellm/issues/7755
This commit is contained in:
parent
37ed49fe72
commit
6eb2346fd6
12 changed files with 259 additions and 62 deletions
|
@ -259,6 +259,59 @@ class BaseLLMChatTest(ABC):
|
|||
except litellm.InternalServerError:
|
||||
pytest.skip("Model is overloaded")
|
||||
|
||||
@pytest.mark.flaky(retries=6, delay=1)
|
||||
def test_json_response_pydantic_obj_nested_obj(self):
|
||||
litellm.set_verbose = True
|
||||
from pydantic import BaseModel
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
|
||||
litellm.model_cost = litellm.get_model_cost_map(url="")
|
||||
|
||||
@pytest.mark.flaky(retries=6, delay=1)
|
||||
def test_json_response_nested_pydantic_obj(self):
|
||||
from pydantic import BaseModel
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
|
||||
litellm.model_cost = litellm.get_model_cost_map(url="")
|
||||
|
||||
class CalendarEvent(BaseModel):
|
||||
name: str
|
||||
date: str
|
||||
participants: list[str]
|
||||
|
||||
class EventsList(BaseModel):
|
||||
events: list[CalendarEvent]
|
||||
|
||||
messages = [
|
||||
{"role": "user", "content": "List 5 important events in the XIX century"}
|
||||
]
|
||||
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
if not supports_response_schema(base_completion_call_args["model"], None):
|
||||
pytest.skip(
|
||||
f"Model={base_completion_call_args['model']} does not support response schema"
|
||||
)
|
||||
|
||||
try:
|
||||
res = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
response_format=EventsList,
|
||||
timeout=60,
|
||||
)
|
||||
assert res is not None
|
||||
|
||||
print(res.choices[0].message)
|
||||
|
||||
assert res.choices[0].message.content is not None
|
||||
assert res.choices[0].message.tool_calls is None
|
||||
except litellm.Timeout:
|
||||
pytest.skip("Model took too long to respond")
|
||||
except litellm.InternalServerError:
|
||||
pytest.skip("Model is overloaded")
|
||||
|
||||
@pytest.mark.flaky(retries=6, delay=1)
|
||||
def test_json_response_format_stream(self):
|
||||
"""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue