forked from phoenix/litellm-mirror
(fix) using Anthropic response_format={"type": "json_object"}
(#6721)
* add support for response_format=json anthropic * add test_json_response_format to baseLLM ChatTest * fix test_litellm_anthropic_prompt_caching_tools * fix test_anthropic_function_call_with_no_schema * test test_create_json_tool_call_for_response_format
This commit is contained in:
parent
e7543378b8
commit
6d4cf2d908
4 changed files with 105 additions and 17 deletions
|
@ -7,6 +7,7 @@ from litellm.types.llms.anthropic import (
|
|||
AllAnthropicToolsValues,
|
||||
AnthropicComputerTool,
|
||||
AnthropicHostedTools,
|
||||
AnthropicInputSchema,
|
||||
AnthropicMessageRequestBase,
|
||||
AnthropicMessagesRequest,
|
||||
AnthropicMessagesTool,
|
||||
|
@ -159,15 +160,17 @@ class AnthropicConfig:
|
|||
returned_tool: Optional[AllAnthropicToolsValues] = None
|
||||
|
||||
if tool["type"] == "function" or tool["type"] == "custom":
|
||||
_input_schema: dict = tool["function"].get(
|
||||
"parameters",
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
},
|
||||
)
|
||||
input_schema: AnthropicInputSchema = AnthropicInputSchema(**_input_schema)
|
||||
_tool = AnthropicMessagesTool(
|
||||
name=tool["function"]["name"],
|
||||
input_schema=tool["function"].get(
|
||||
"parameters",
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
},
|
||||
),
|
||||
input_schema=input_schema,
|
||||
)
|
||||
|
||||
_description = tool["function"].get("description")
|
||||
|
@ -304,17 +307,10 @@ class AnthropicConfig:
|
|||
- You should set tool_choice (see Forcing tool use) to instruct the model to explicitly use that tool
|
||||
- Remember that the model will pass the input to the tool, so the name of the tool and description should be from the model’s perspective.
|
||||
"""
|
||||
_tool_choice = None
|
||||
_tool_choice = {"name": "json_tool_call", "type": "tool"}
|
||||
|
||||
_tool = AnthropicMessagesTool(
|
||||
name="json_tool_call",
|
||||
input_schema={
|
||||
"type": "object",
|
||||
"properties": {"values": json_schema}, # type: ignore
|
||||
},
|
||||
_tool = self._create_json_tool_call_for_response_format(
|
||||
json_schema=json_schema,
|
||||
)
|
||||
|
||||
optional_params["tools"] = [_tool]
|
||||
optional_params["tool_choice"] = _tool_choice
|
||||
optional_params["json_mode"] = True
|
||||
|
@ -341,6 +337,34 @@ class AnthropicConfig:
|
|||
|
||||
return optional_params
|
||||
|
||||
def _create_json_tool_call_for_response_format(
|
||||
self,
|
||||
json_schema: Optional[dict] = None,
|
||||
) -> AnthropicMessagesTool:
|
||||
"""
|
||||
Handles creating a tool call for getting responses in JSON format.
|
||||
|
||||
Args:
|
||||
json_schema (Optional[dict]): The JSON schema the response should be in
|
||||
|
||||
Returns:
|
||||
AnthropicMessagesTool: The tool call to send to Anthropic API to get responses in JSON format
|
||||
"""
|
||||
_input_schema: AnthropicInputSchema = AnthropicInputSchema(
|
||||
type="object",
|
||||
)
|
||||
|
||||
if json_schema is None:
|
||||
# Anthropic raises a 400 BadRequest error if properties is passed as None
|
||||
# see usage with additionalProperties (Example 5) https://github.com/anthropics/anthropic-cookbook/blob/main/tool_use/extracting_structured_json.ipynb
|
||||
_input_schema["additionalProperties"] = True
|
||||
_input_schema["properties"] = {}
|
||||
else:
|
||||
_input_schema["properties"] = json_schema
|
||||
|
||||
_tool = AnthropicMessagesTool(name="json_tool_call", input_schema=_input_schema)
|
||||
return _tool
|
||||
|
||||
def is_cache_control_set(self, messages: List[AllMessageValues]) -> bool:
|
||||
"""
|
||||
Return if {"cache_control": ..} in message content block
|
||||
|
|
|
@ -12,10 +12,16 @@ class AnthropicMessagesToolChoice(TypedDict, total=False):
|
|||
disable_parallel_tool_use: bool # default is false
|
||||
|
||||
|
||||
class AnthropicInputSchema(TypedDict, total=False):
|
||||
type: Optional[str]
|
||||
properties: Optional[dict]
|
||||
additionalProperties: Optional[bool]
|
||||
|
||||
|
||||
class AnthropicMessagesTool(TypedDict, total=False):
|
||||
name: Required[str]
|
||||
description: str
|
||||
input_schema: Required[dict]
|
||||
input_schema: Optional[AnthropicInputSchema]
|
||||
type: Literal["custom"]
|
||||
cache_control: Optional[Union[dict, ChatCompletionCachedContent]]
|
||||
|
||||
|
|
|
@ -53,6 +53,32 @@ class BaseLLMChatTest(ABC):
|
|||
response = litellm.completion(**base_completion_call_args, messages=messages)
|
||||
assert response is not None
|
||||
|
||||
def test_json_response_format(self):
|
||||
"""
|
||||
Test that the JSON response format is supported by the LLM API
|
||||
"""
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
litellm.set_verbose = True
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Your output should be a JSON object with no additional properties. ",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Respond with this in json. city=San Francisco, state=CA, weather=sunny, temp=60",
|
||||
},
|
||||
]
|
||||
|
||||
response = litellm.completion(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
|
||||
print(response)
|
||||
|
||||
@pytest.fixture
|
||||
def pdf_messages(self):
|
||||
import base64
|
||||
|
|
|
@ -627,6 +627,38 @@ def test_anthropic_tool_helper(cache_control_location):
|
|||
assert tool["cache_control"] == {"type": "ephemeral"}
|
||||
|
||||
|
||||
def test_create_json_tool_call_for_response_format():
|
||||
"""
|
||||
tests using response_format=json with anthropic
|
||||
|
||||
A tool call to anthropic is made when response_format=json is used.
|
||||
|
||||
"""
|
||||
# Initialize AnthropicConfig
|
||||
config = AnthropicConfig()
|
||||
|
||||
# Test case 1: No schema provided
|
||||
# See Anthropics Example 5 on how to handle cases when no schema is provided https://github.com/anthropics/anthropic-cookbook/blob/main/tool_use/extracting_structured_json.ipynb
|
||||
tool = config._create_json_tool_call_for_response_format()
|
||||
assert tool["name"] == "json_tool_call"
|
||||
_input_schema = tool.get("input_schema")
|
||||
assert _input_schema is not None
|
||||
assert _input_schema.get("type") == "object"
|
||||
assert _input_schema.get("additionalProperties") is True
|
||||
assert _input_schema.get("properties") == {}
|
||||
|
||||
# Test case 2: With custom schema
|
||||
# reference: https://github.com/anthropics/anthropic-cookbook/blob/main/tool_use/extracting_structured_json.ipynb
|
||||
custom_schema = {"name": {"type": "string"}, "age": {"type": "integer"}}
|
||||
tool = config._create_json_tool_call_for_response_format(json_schema=custom_schema)
|
||||
assert tool["name"] == "json_tool_call"
|
||||
_input_schema = tool.get("input_schema")
|
||||
assert _input_schema is not None
|
||||
assert _input_schema.get("type") == "object"
|
||||
assert _input_schema.get("properties") == custom_schema
|
||||
assert "additionalProperties" not in _input_schema
|
||||
|
||||
|
||||
from litellm import completion
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue