add support for response_format=json anthropic

This commit is contained in:
Ishaan Jaff 2024-11-12 14:37:55 -08:00
parent 4192d7ec6f
commit 7ef3b680a2
2 changed files with 49 additions and 17 deletions

View file

@ -7,6 +7,7 @@ from litellm.types.llms.anthropic import (
AllAnthropicToolsValues,
AnthropicComputerTool,
AnthropicHostedTools,
AnthropicInputSchema,
AnthropicMessageRequestBase,
AnthropicMessagesRequest,
AnthropicMessagesTool,
@ -159,15 +160,19 @@ class AnthropicConfig:
returned_tool: Optional[AllAnthropicToolsValues] = None
if tool["type"] == "function" or tool["type"] == "custom":
_input_function_parameters: dict = (
tool["function"].get("parameters", None) or {}
)
_tool_input_schema: AnthropicInputSchema = AnthropicInputSchema(
type=_input_function_parameters.get("type", "object"),
properties=_input_function_parameters.get("properties", {}),
additionalProperties=_input_function_parameters.get(
"additionalProperties", True
),
)
_tool = AnthropicMessagesTool(
name=tool["function"]["name"],
input_schema=tool["function"].get(
"parameters",
{
"type": "object",
"properties": {},
},
),
input_schema=_tool_input_schema,
)
_description = tool["function"].get("description")
@ -304,17 +309,10 @@ class AnthropicConfig:
- You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
- Remember that the model will pass the input to the tool, so the name of the tool and description should be from the models perspective.
"""
_tool_choice = None
_tool_choice = {"name": "json_tool_call", "type": "tool"}
_tool = AnthropicMessagesTool(
name="json_tool_call",
input_schema={
"type": "object",
"properties": {"values": json_schema}, # type: ignore
},
_tool = self._create_json_tool_call_for_response_format(
json_schema=json_schema,
)
optional_params["tools"] = [_tool]
optional_params["tool_choice"] = _tool_choice
optional_params["json_mode"] = True
@ -341,6 +339,34 @@ class AnthropicConfig:
return optional_params
def _create_json_tool_call_for_response_format(
self,
json_schema: Optional[dict] = None,
) -> AnthropicMessagesTool:
"""
Handles creating a tool call for getting responses in JSON format.
Args:
json_schema (Optional[dict]): The JSON schema the response should be in
Returns:
AnthropicMessagesTool: The tool call to send to Anthropic API to get responses in JSON format
"""
_input_schema: AnthropicInputSchema = AnthropicInputSchema(
type="object",
)
if json_schema is None:
# Anthropic raises a 400 BadRequest error if properties is passed as None
# see usage with additionalProperties (Example 5) https://github.com/anthropics/anthropic-cookbook/blob/main/tool_use/extracting_structured_json.ipynb
_input_schema["additionalProperties"] = True
_input_schema["properties"] = {}
else:
_input_schema["properties"] = json_schema
_tool = AnthropicMessagesTool(name="json_tool_call", input_schema=_input_schema)
return _tool
def is_cache_control_set(self, messages: List[AllMessageValues]) -> bool:
"""
Return if {"cache_control": ..} in message content block

View file

@ -12,10 +12,16 @@ class AnthropicMessagesToolChoice(TypedDict, total=False):
disable_parallel_tool_use: bool # default is false
class AnthropicInputSchema(TypedDict, total=False):
type: Optional[str]
properties: Optional[dict]
additionalProperties: Optional[bool]
class AnthropicMessagesTool(TypedDict, total=False):
name: Required[str]
description: str
input_schema: Required[dict]
input_schema: Optional[AnthropicInputSchema]
type: Literal["custom"]
cache_control: Optional[Union[dict, ChatCompletionCachedContent]]