feat(vertex_httpx.py): Support gemini 'response_schema' param

Fixes https://github.com/BerriAI/litellm/pull/3366#issuecomment-2269183338
This commit is contained in:
Krrish Dholakia 2024-08-05 07:56:44 -07:00
parent 1c747f3ad3
commit aab09f5b86
2 changed files with 10 additions and 4 deletions

View file

@ -182,7 +182,12 @@ class GoogleAIStudioGeminiConfig: # key diff from VertexAI - 'frequency_penalty
if param == "max_tokens":
optional_params["max_output_tokens"] = value
if param == "response_format" and value["type"] == "json_object": # type: ignore
optional_params["response_mime_type"] = "application/json"
if value["type"] == "json_object": # type: ignore
optional_params["response_mime_type"] = "application/json"
elif value["type"] == "text": # type: ignore
optional_params["response_mime_type"] = "text/plain"
if "response_schema" in value: # type: ignore
optional_params["response_schema"] = value["response_schema"] # type: ignore
if param == "tools" and isinstance(value, list):
gtool_func_declarations = []
for tool in value:

View file

@ -903,10 +903,10 @@ from litellm.tests.test_completion import response_format_tests
@pytest.mark.parametrize(
"model",
[
# "vertex_ai/mistral-large@2407",
# "vertex_ai/mistral-nemo@2407",
"vertex_ai/mistral-large@2407",
"vertex_ai/mistral-nemo@2407",
"vertex_ai/codestral@2405",
# "vertex_ai/meta/llama3-405b-instruct-maas",
"vertex_ai/meta/llama3-405b-instruct-maas",
], #
) # "vertex_ai",
@pytest.mark.parametrize(
@ -1347,6 +1347,7 @@ def vertex_httpx_mock_post_invalid_schema_response_anthropic(*args, **kwargs):
"model, vertex_location, supports_response_schema",
[
("vertex_ai_beta/gemini-1.5-pro-001", "us-central1", True),
("gemini/gemini-1.5-pro", None, True),
("vertex_ai_beta/gemini-1.5-flash", "us-central1", False),
("vertex_ai/claude-3-5-sonnet@20240620", "us-east5", False),
],