From 20e39d6acca8274f9a7e5fdb1d2cb81d8fa9ca39 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 6 Jul 2024 17:27:14 -0700 Subject: [PATCH] fix(utils.py): cleanup 'additionalProperties=False' for tool calling with zod Fixes issue with zod passing in additionalProperties=False, causing vertex ai / gemini calls to fail --- litellm/litellm_core_utils/litellm_logging.py | 1 - litellm/proxy/_new_secret_config.yaml | 5 +++-- litellm/proxy/litellm_pre_call_utils.py | 1 + litellm/proxy/proxy_server.py | 4 ++++ litellm/proxy/utils.py | 2 +- litellm/utils.py | 18 ++++++++++++++++++ 6 files changed, 27 insertions(+), 4 deletions(-) diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index f9f32552d..8aae2afd4 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -1799,7 +1799,6 @@ def set_callbacks(callback_list, function_id=None): try: for callback in callback_list: - print_verbose(f"init callback list: {callback}") if callback == "sentry": try: import sentry_sdk diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index a7d46d506..eaeb7bbcb 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -2,8 +2,9 @@ model_list: - model_name: tts litellm_params: model: "openai/*" -litellm_settings: - success_callback: ["langfuse"] + - model_name: gemini-1.5-flash + litellm_params: + model: gemini/gemini-1.5-flash general_settings: alerting: ["slack"] diff --git a/litellm/proxy/litellm_pre_call_utils.py b/litellm/proxy/litellm_pre_call_utils.py index aec6215ce..673b027ca 100644 --- a/litellm/proxy/litellm_pre_call_utils.py +++ b/litellm/proxy/litellm_pre_call_utils.py @@ -176,6 +176,7 @@ async def add_litellm_data_to_request( def _add_otel_traceparent_to_data(data: dict, request: Request): from litellm.proxy.proxy_server import open_telemetry_logger + if data is None: return if open_telemetry_logger is None: diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index b673b26ab..b05cd2505 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -2720,6 +2720,10 @@ async def chat_completion( except: data = json.loads(body_str) + verbose_proxy_logger.debug( + "Request received by LiteLLM:\n{}".format(json.dumps(data, indent=4)), + ) + data = await add_litellm_data_to_request( data=data, request=request, diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 8d4eff99a..661b0ce04 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -353,7 +353,7 @@ class ProxyLogging: raise HTTPException( status_code=400, detail={"error": response} ) - print_verbose(f"final data being sent to {call_type} call: {data}") + return data except Exception as e: raise e diff --git a/litellm/utils.py b/litellm/utils.py index 62386b1d2..871db0a32 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2555,6 +2555,24 @@ def get_optional_params( message=f"Function calling is not supported by {custom_llm_provider}.", ) + if "tools" in non_default_params: + tools = non_default_params["tools"] + for ( + tool + ) in ( + tools + ): # clean out 'additionalProperties = False'. Causes vertexai/gemini OpenAI API Schema errors - https://github.com/langchain-ai/langchainjs/issues/5240 + tool_function = tool.get("function", {}) + parameters = tool_function.get("parameters", None) + if parameters is not None: + new_parameters = copy.deepcopy(parameters) + if ( + "additionalProperties" in new_parameters + and new_parameters["additionalProperties"] is False + ): + new_parameters.pop("additionalProperties", None) + tool_function["parameters"] = new_parameters + def _check_valid_arg(supported_params): verbose_logger.debug( f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}"