Litellm dev 04 05 2025 p2 (#9774)

* test: move test to just checking async

* fix(transformation.py): handle function call with no schema

* fix(utils.py): handle pydantic base model in message tool calls

Fix https://github.com/BerriAI/litellm/issues/9321

* fix(vertex_and_google_ai_studio.py): handle tools=[]

Fixes https://github.com/BerriAI/litellm/issues/9080

* test: remove max token restriction

* test: fix basic test

* fix(get_supported_openai_params.py): fix check

* fix(converse_transformation.py): support fake streaming for meta.llama3-3-70b-instruct-v1:0

* fix: fix test

* fix: parse out empty dictionary on dbrx streaming + tool calls

* fix(handle-'strict'-param-when-calling-fireworks-ai): fireworks ai does not support 'strict' param

* fix: fix ruff check

'

* fix: handle no strict in function

* fix: revert bedrock change - handle in separate PR
This commit is contained in:
Krish Dholakia 2025-04-07 21:02:52 -07:00 committed by GitHub
parent d8f47fc9e5
commit fcf17d114f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 214 additions and 11 deletions

View file

@ -27,7 +27,7 @@ from litellm.litellm_core_utils.prompt_templates.common_utils import (
strip_name_from_messages,
)
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.types.llms.anthropic import AnthropicMessagesTool
from litellm.types.llms.anthropic import AllAnthropicToolsValues
from litellm.types.llms.databricks import (
AllDatabricksContentValues,
DatabricksChoice,
@ -160,7 +160,7 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
]
def convert_anthropic_tool_to_databricks_tool(
self, tool: Optional[AnthropicMessagesTool]
self, tool: Optional[AllAnthropicToolsValues]
) -> Optional[DatabricksTool]:
if tool is None:
return None
@ -173,6 +173,19 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
),
)
def _map_openai_to_dbrx_tool(self, model: str, tools: List) -> List[DatabricksTool]:
# if not claude, send as is
if "claude" not in model:
return tools
# if claude, convert to anthropic tool and then to databricks tool
anthropic_tools = self._map_tools(tools=tools)
databricks_tools = [
cast(DatabricksTool, self.convert_anthropic_tool_to_databricks_tool(tool))
for tool in anthropic_tools
]
return databricks_tools
def map_response_format_to_databricks_tool(
self,
model: str,
@ -202,6 +215,10 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
mapped_params = super().map_openai_params(
non_default_params, optional_params, model, drop_params
)
if "tools" in mapped_params:
mapped_params["tools"] = self._map_openai_to_dbrx_tool(
model=model, tools=mapped_params["tools"]
)
if (
"max_completion_tokens" in non_default_params
and replace_max_completion_tokens_with_max_tokens
@ -499,7 +516,10 @@ class DatabricksChatResponseIterator(BaseModelResponseIterator):
message.content = ""
choice["delta"]["content"] = message.content
choice["delta"]["tool_calls"] = None
elif tool_calls:
for _tc in tool_calls:
if _tc.get("function", {}).get("arguments") == "{}":
_tc["function"]["arguments"] = "" # avoid invalid json
# extract the content str
content_str = DatabricksConfig.extract_content_str(
choice["delta"].get("content")