forked from phoenix/litellm-mirror
* Fixed gemini-1.5-flash pricing * (models): Added missing gemini experimental models + fixed pricing for gemini-1.5-pro-exp-0827 * Added gemini/gemini-1.5-flash-001 model * Updated supports_response_schema to true for gemini flash 1.5 models * feat(vertex): Use correct provider for response_schema support check Co-authored-by: F1bos <44951186+F1bos@users.noreply.github.com>
This commit is contained in:
parent
2516360ceb
commit
4911979c61
2 changed files with 18 additions and 4 deletions
|
@ -2,7 +2,7 @@ from typing import List, Literal, Tuple
|
|||
|
||||
import httpx
|
||||
|
||||
from litellm import supports_system_messages, verbose_logger
|
||||
from litellm import supports_system_messages, supports_response_schema, verbose_logger
|
||||
from litellm.types.llms.vertex_ai import PartType
|
||||
|
||||
|
||||
|
@ -40,6 +40,20 @@ def get_supports_system_message(
|
|||
return supports_system_message
|
||||
|
||||
|
||||
def get_supports_response_schema(
|
||||
model: str, custom_llm_provider: Literal["vertex_ai", "vertex_ai_beta", "gemini"]
|
||||
) -> bool:
|
||||
_custom_llm_provider = custom_llm_provider
|
||||
if custom_llm_provider == "vertex_ai_beta":
|
||||
_custom_llm_provider = "vertex_ai"
|
||||
|
||||
_supports_response_schema = supports_response_schema(
|
||||
model=model, custom_llm_provider=_custom_llm_provider
|
||||
)
|
||||
|
||||
return _supports_response_schema
|
||||
|
||||
|
||||
from typing import Literal, Optional
|
||||
|
||||
all_gemini_url_modes = Literal["chat", "embedding", "batch_embedding"]
|
||||
|
|
|
@ -22,7 +22,7 @@ from litellm.types.llms.vertex_ai import (
|
|||
Tools,
|
||||
)
|
||||
|
||||
from ..common_utils import get_supports_system_message
|
||||
from ..common_utils import get_supports_system_message, get_supports_response_schema
|
||||
from ..vertex_ai_non_gemini import _gemini_convert_messages_with_history
|
||||
|
||||
|
||||
|
@ -46,8 +46,8 @@ def _transform_request_body(
|
|||
)
|
||||
# Checks for 'response_schema' support - if passed in
|
||||
if "response_schema" in optional_params:
|
||||
supports_response_schema = litellm.supports_response_schema(
|
||||
model=model, custom_llm_provider="vertex_ai"
|
||||
supports_response_schema = get_supports_response_schema(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
if supports_response_schema is False:
|
||||
user_response_schema_message = response_schema_prompt(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue