Litellm dev 04 05 2025 p2 (#9774)

* test: move test to just checking async

* fix(transformation.py): handle function call with no schema

* fix(utils.py): handle pydantic base model in message tool calls

Fix https://github.com/BerriAI/litellm/issues/9321

* fix(vertex_and_google_ai_studio.py): handle tools=[]

Fixes https://github.com/BerriAI/litellm/issues/9080

* test: remove max token restriction

* test: fix basic test

* fix(get_supported_openai_params.py): fix check

* fix(converse_transformation.py): support fake streaming for meta.llama3-3-70b-instruct-v1:0

* fix: fix test

* fix: parse out empty dictionary on dbrx streaming + tool calls

* fix(handle-'strict'-param-when-calling-fireworks-ai): fireworks ai does not support 'strict' param

* fix: fix ruff check

'

* fix: handle no strict in function

* fix: revert bedrock change - handle in separate PR
This commit is contained in:
Krish Dholakia 2025-04-07 21:02:52 -07:00 committed by GitHub
parent d8f47fc9e5
commit fcf17d114f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 214 additions and 11 deletions

View file

@ -30,6 +30,7 @@ from litellm.types.llms.openai import (
ChatCompletionToolParam,
ChatCompletionToolParamFunctionChunk,
ChatCompletionUserMessage,
OpenAIChatCompletionToolParam,
OpenAIMessageContentListBlock,
)
from litellm.types.utils import ModelResponse, PromptTokensDetailsWrapper, Usage
@ -211,6 +212,23 @@ class AmazonConverseConfig(BaseConfig):
)
return _tool
def _apply_tool_call_transformation(
self,
tools: List[OpenAIChatCompletionToolParam],
model: str,
non_default_params: dict,
optional_params: dict,
):
optional_params = self._add_tools_to_optional_params(
optional_params=optional_params, tools=tools
)
if (
"meta.llama3-3-70b-instruct-v1:0" in model
and non_default_params.get("stream", False) is True
):
optional_params["fake_stream"] = True
def map_openai_params(
self,
non_default_params: dict,
@ -286,8 +304,11 @@ class AmazonConverseConfig(BaseConfig):
if param == "top_p":
optional_params["topP"] = value
if param == "tools" and isinstance(value, list):
optional_params = self._add_tools_to_optional_params(
optional_params=optional_params, tools=value
self._apply_tool_call_transformation(
tools=cast(List[OpenAIChatCompletionToolParam], value),
model=model,
non_default_params=non_default_params,
optional_params=optional_params,
)
if param == "tool_choice":
_tool_choice_value = self.map_tool_choice_values(

View file

@ -27,7 +27,7 @@ from litellm.litellm_core_utils.prompt_templates.common_utils import (
strip_name_from_messages,
)
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
from litellm.types.llms.anthropic import AnthropicMessagesTool
from litellm.types.llms.anthropic import AllAnthropicToolsValues
from litellm.types.llms.databricks import (
AllDatabricksContentValues,
DatabricksChoice,
@ -160,7 +160,7 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
]
def convert_anthropic_tool_to_databricks_tool(
self, tool: Optional[AnthropicMessagesTool]
self, tool: Optional[AllAnthropicToolsValues]
) -> Optional[DatabricksTool]:
if tool is None:
return None
@ -173,6 +173,19 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
),
)
def _map_openai_to_dbrx_tool(self, model: str, tools: List) -> List[DatabricksTool]:
# if not claude, send as is
if "claude" not in model:
return tools
# if claude, convert to anthropic tool and then to databricks tool
anthropic_tools = self._map_tools(tools=tools)
databricks_tools = [
cast(DatabricksTool, self.convert_anthropic_tool_to_databricks_tool(tool))
for tool in anthropic_tools
]
return databricks_tools
def map_response_format_to_databricks_tool(
self,
model: str,
@ -202,6 +215,10 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
mapped_params = super().map_openai_params(
non_default_params, optional_params, model, drop_params
)
if "tools" in mapped_params:
mapped_params["tools"] = self._map_openai_to_dbrx_tool(
model=model, tools=mapped_params["tools"]
)
if (
"max_completion_tokens" in non_default_params
and replace_max_completion_tokens_with_max_tokens
@ -499,7 +516,10 @@ class DatabricksChatResponseIterator(BaseModelResponseIterator):
message.content = ""
choice["delta"]["content"] = message.content
choice["delta"]["tool_calls"] = None
elif tool_calls:
for _tc in tool_calls:
if _tc.get("function", {}).get("arguments") == "{}":
_tc["function"]["arguments"] = "" # avoid invalid json
# extract the content str
content_str = DatabricksConfig.extract_content_str(
choice["delta"].get("content")

View file

@ -2,7 +2,11 @@ from typing import List, Literal, Optional, Tuple, Union, cast
import litellm
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import AllMessageValues, ChatCompletionImageObject
from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionImageObject,
OpenAIChatCompletionToolParam,
)
from litellm.types.utils import ProviderSpecificModelInfo
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
@ -150,6 +154,14 @@ class FireworksAIConfig(OpenAIGPTConfig):
] = f"{content['image_url']['url']}#transform=inline"
return content
def _transform_tools(
self, tools: List[OpenAIChatCompletionToolParam]
) -> List[OpenAIChatCompletionToolParam]:
for tool in tools:
if tool.get("type") == "function":
tool["function"].pop("strict", None)
return tools
def _transform_messages_helper(
self, messages: List[AllMessageValues], model: str, litellm_params: dict
) -> List[AllMessageValues]:
@ -196,6 +208,9 @@ class FireworksAIConfig(OpenAIGPTConfig):
messages = self._transform_messages_helper(
messages=messages, model=model, litellm_params=litellm_params
)
if "tools" in optional_params and optional_params["tools"] is not None:
tools = self._transform_tools(tools=optional_params["tools"])
optional_params["tools"] = tools
return super().transform_request(
model=model,
messages=messages,

View file

@ -374,7 +374,11 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
optional_params["responseLogprobs"] = value
elif param == "top_logprobs":
optional_params["logprobs"] = value
elif (param == "tools" or param == "functions") and isinstance(value, list):
elif (
(param == "tools" or param == "functions")
and isinstance(value, list)
and value
):
optional_params["tools"] = self._map_function(value=value)
optional_params["litellm_param_is_function_call"] = (
True if param == "functions" else False

View file

@ -695,6 +695,7 @@ class ChatCompletionToolParamFunctionChunk(TypedDict, total=False):
name: Required[str]
description: str
parameters: dict
strict: bool
class OpenAIChatCompletionToolParam(TypedDict):

View file

@ -6112,6 +6112,8 @@ def validate_and_fix_openai_messages(messages: List):
for message in messages:
if not message.get("role"):
message["role"] = "assistant"
if message.get("tool_calls"):
message["tool_calls"] = jsonify_tools(tools=message["tool_calls"])
return validate_chat_completion_messages(messages=messages)
@ -6705,3 +6707,20 @@ def return_raw_request(endpoint: CallTypes, kwargs: dict) -> RawRequestTypedDict
return RawRequestTypedDict(
error=received_exception,
)
def jsonify_tools(tools: List[Any]) -> List[Dict]:
"""
Fixes https://github.com/BerriAI/litellm/issues/9321
Where user passes in a pydantic base model
"""
new_tools: List[Dict] = []
for tool in tools:
if isinstance(tool, BaseModel):
tool = tool.model_dump(exclude_none=True)
elif isinstance(tool, dict):
tool = tool.copy()
if isinstance(tool, dict):
new_tools.append(tool)
return new_tools