mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 12 24 2024 p2 (#7400)
* fix(utils.py): default custom_llm_provider=None for 'supports_response_schema' Closes https://github.com/BerriAI/litellm/issues/7397 * refactor(langfuse/): call langfuse logger inside customlogger compatible langfuse class, refactor langfuse logger to use verbose_logger.debug instead of print_verbose * refactor(litellm_pre_call_utils.py): move config based team callbacks inside dynamic team callback logic enables simpler unit testing for config-based team callbacks * fix(proxy/_types.py): handle teamcallbackmetadata - none values drop none values if present. if all none, use default dict to avoid downstream errors * test(test_proxy_utils.py): add unit test preventing future issues - asserts team_id in config state not popped off across calls Fixes https://github.com/BerriAI/litellm/issues/6787 * fix(langfuse_prompt_management.py): add success + failure logging event support * fix: fix linting error * test: fix test * test: fix test * test: override o1 prompt caching - openai currently not working * test: fix test
This commit is contained in:
parent
d790ba0897
commit
c95351e70f
12 changed files with 227 additions and 62 deletions
|
@ -1658,7 +1658,9 @@ def supports_system_messages(model: str, custom_llm_provider: Optional[str]) ->
|
|||
)
|
||||
|
||||
|
||||
def supports_response_schema(model: str, custom_llm_provider: Optional[str]) -> bool:
|
||||
def supports_response_schema(
|
||||
model: str, custom_llm_provider: Optional[str] = None
|
||||
) -> bool:
|
||||
"""
|
||||
Check if the given model + provider supports 'response_schema' as a param.
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue