mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
Fix bedrock passing response_format: {"type": "text"}
(#8900)
* fix(converse_transformation.py): ignore type: text, value in response_format no-op for bedrock * fix(converse_transformation.py): handle adding response format value to tools * fix(base_invoke_transformation.py): fix 'get_bedrock_invoke_provider' to handle cross-region-inferencing models * test(test_bedrock_completion.py): add unit testing for bedrock invoke provider logic * test: update test * fix(exception_mapping_utils.py): add context window exceeded error handling for databricks provider route * fix(fireworks_ai/): support passing tools + response_format together * fix: cleanup * fix(base_invoke_transformation.py): fix imports
This commit is contained in:
parent
c8dc4f3eec
commit
c84b489d58
8 changed files with 194 additions and 24 deletions
|
@ -254,6 +254,56 @@ class BaseLLMChatTest(ABC):
|
|||
# relevant issue: https://github.com/BerriAI/litellm/issues/6741
|
||||
assert response.choices[0].message.content is not None
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"response_format",
|
||||
[
|
||||
{"type": "text"},
|
||||
],
|
||||
)
|
||||
@pytest.mark.flaky(retries=6, delay=1)
|
||||
def test_response_format_type_text_with_tool_calls_no_tool_choice(
|
||||
self, response_format
|
||||
):
|
||||
base_completion_call_args = self.get_base_completion_call_args()
|
||||
messages = [
|
||||
{"role": "user", "content": "What's the weather like in Boston today?"},
|
||||
]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
try:
|
||||
response = self.completion_function(
|
||||
**base_completion_call_args,
|
||||
messages=messages,
|
||||
response_format=response_format,
|
||||
tools=tools,
|
||||
drop_params=True,
|
||||
)
|
||||
except litellm.ContextWindowExceededError:
|
||||
pytest.skip("Model exceeded context window")
|
||||
assert response is not None
|
||||
|
||||
def test_response_format_type_text(self):
|
||||
"""
|
||||
Test that the response format type text does not lead to tool calls
|
||||
|
@ -287,6 +337,7 @@ class BaseLLMChatTest(ABC):
|
|||
|
||||
print(f"translated_params={translated_params}")
|
||||
|
||||
|
||||
@pytest.mark.flaky(retries=6, delay=1)
|
||||
def test_json_response_pydantic_obj(self):
|
||||
litellm.set_verbose = True
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue