mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 02 10 2025 p1 (#8438)
* fix(azure/chat/gpt_transformation.py): fix str compare to use int - ensure correct api version check is done Resolves https://github.com/BerriAI/litellm/issues/8241#issuecomment-2647142891 * test(test_azure_openai.py): add better testing
This commit is contained in:
parent
6a4013a075
commit
47f46f92c8
3 changed files with 68 additions and 27 deletions
|
@ -285,12 +285,27 @@ def test_azure_openai_gpt_4o_naming(monkeypatch):
|
|||
assert "tool_calls" not in mock_post.call_args.kwargs
|
||||
|
||||
|
||||
def test_azure_gpt_4o_with_tool_call_and_response_format():
|
||||
@pytest.mark.parametrize(
|
||||
"api_version",
|
||||
[
|
||||
"2024-10-21",
|
||||
# "2024-02-15-preview",
|
||||
],
|
||||
)
|
||||
def test_azure_gpt_4o_with_tool_call_and_response_format(api_version):
|
||||
from litellm import completion
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel
|
||||
import litellm
|
||||
|
||||
from openai import AzureOpenAI
|
||||
|
||||
client = AzureOpenAI(
|
||||
api_key="fake-key",
|
||||
base_url="https://fake-azure.openai.azure.com",
|
||||
api_version=api_version,
|
||||
)
|
||||
|
||||
class InvestigationOutput(BaseModel):
|
||||
alert_explanation: Optional[str] = None
|
||||
investigation: Optional[str] = None
|
||||
|
@ -322,25 +337,34 @@ def test_azure_gpt_4o_with_tool_call_and_response_format():
|
|||
}
|
||||
]
|
||||
|
||||
response = litellm.completion(
|
||||
model="azure/gpt-4o",
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a tool-calling AI assist provided with common devops and IT tools that you can use to troubleshoot problems or answer questions.\nWhenever possible you MUST first use tools to investigate then answer the question.",
|
||||
},
|
||||
{"role": "user", "content": "What is the current date and time in NYC?"},
|
||||
],
|
||||
drop_params=True,
|
||||
temperature=0.00000001,
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
response_format=InvestigationOutput, # commenting this line will cause the output to be correct
|
||||
)
|
||||
with patch.object(client.chat.completions.with_raw_response, "create") as mock_post:
|
||||
response = litellm.completion(
|
||||
model="azure/gpt-4o",
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a tool-calling AI assist provided with common devops and IT tools that you can use to troubleshoot problems or answer questions.\nWhenever possible you MUST first use tools to investigate then answer the question.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What is the current date and time in NYC?",
|
||||
},
|
||||
],
|
||||
drop_params=True,
|
||||
temperature=0.00000001,
|
||||
tools=tools,
|
||||
tool_choice="auto",
|
||||
response_format=InvestigationOutput, # commenting this line will cause the output to be correct
|
||||
api_version=api_version,
|
||||
client=client,
|
||||
)
|
||||
|
||||
assert response.choices[0].finish_reason == "tool_calls"
|
||||
mock_post.assert_called_once()
|
||||
|
||||
print(response.to_json())
|
||||
if api_version == "2024-10-21":
|
||||
assert "response_format" in mock_post.call_args.kwargs
|
||||
else:
|
||||
assert "response_format" not in mock_post.call_args.kwargs
|
||||
|
||||
|
||||
def test_map_openai_params():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue