mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(anthropic/chat/transformation.py): correctly update response_format to tool call transformation
Fixes https://github.com/BerriAI/litellm/issues/9411
This commit is contained in:
parent
b2852676f6
commit
b3b93dbd14
3 changed files with 74 additions and 4 deletions
|
@ -20,6 +20,7 @@ from litellm.utils import (
|
|||
get_optional_params,
|
||||
ProviderConfigManager,
|
||||
)
|
||||
from litellm.main import stream_chunk_builder
|
||||
from typing import Union
|
||||
|
||||
# test_example.py
|
||||
|
@ -338,7 +339,7 @@ class BaseLLMChatTest(ABC):
|
|||
|
||||
@pytest.mark.flaky(retries=6, delay=1)
|
||||
def test_json_response_pydantic_obj(self):
|
||||
litellm.set_verbose = True
|
||||
litellm._turn_on_debug()
|
||||
from pydantic import BaseModel
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
|
@ -995,3 +996,72 @@ class BaseOSeriesModelsTest(ABC): # test across azure/openai
|
|||
), "temperature should not be in the request body"
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
class BaseAnthropicChatTest(ABC):
|
||||
"""
|
||||
Ensures consistent result across anthropic model usage
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_base_completion_call_args(self) -> dict:
|
||||
"""Must return the base completion call args"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def completion_function(self):
|
||||
return litellm.completion
|
||||
|
||||
def test_anthropic_response_format_streaming_vs_non_streaming(self):
|
||||
litellm.set_verbose = True
|
||||
args = {
|
||||
"messages": [
|
||||
{
|
||||
"content": "Your goal is to summarize the previous agent's thinking process into short descriptions to let user better understand the research progress. If no information is available, just say generic phrase like 'Doing some research...' with the given output format. Make sure to adhere to the output format no matter what, even if you don't have any information or you are not allowed to respond to the given input information (then just say generic phrase like 'Doing some research...').",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Here is the input data (previous agent's output): \n\n Let's try to refine our search further, focusing more on the technical aspects of home automation and home energy system management:",
|
||||
},
|
||||
],
|
||||
"response_format": {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": "final_output",
|
||||
"strict": True,
|
||||
"schema": {
|
||||
"description": 'Progress report for the thinking process\n\nThis model represents a snapshot of the agent\'s current progress during\nthe thinking process, providing a brief description of the current activity.\n\nAttributes:\n agent_doing: Brief description of what the agent is currently doing.\n Should be kept under 10 words. Example: "Learning about home automation"',
|
||||
"properties": {
|
||||
"agent_doing": {"title": "Agent Doing", "type": "string"}
|
||||
},
|
||||
"required": ["agent_doing"],
|
||||
"title": "ThinkingStep",
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args, **args, stream=True
|
||||
)
|
||||
|
||||
chunks = []
|
||||
for chunk in response:
|
||||
print(f"chunk: {chunk}")
|
||||
chunks.append(chunk)
|
||||
|
||||
print(f"chunks: {chunks}")
|
||||
built_response = stream_chunk_builder(chunks=chunks)
|
||||
|
||||
non_stream_response = self.completion_function(
|
||||
**base_completion_call_args, **args, stream=False
|
||||
)
|
||||
|
||||
assert json.loads(built_response.choices[0].message.content) == json.loads(
|
||||
non_stream_response.choices[0].message.content
|
||||
), f"Got={json.loads(built_response.choices[0].message.content)}, Expected={json.loads(non_stream_response.choices[0].message.content)}"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue