From 5ee6b87f2ea93212f3f6e69e60572cd682ee3011 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Sat, 16 Dec 2023 22:15:41 +0530 Subject: [PATCH] (fix) vertexai - gemini --- litellm/llms/vertex_ai.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/litellm/llms/vertex_ai.py b/litellm/llms/vertex_ai.py index f85bcd0f1..5457ee40d 100644 --- a/litellm/llms/vertex_ai.py +++ b/litellm/llms/vertex_ai.py @@ -244,6 +244,12 @@ def completion( model_response = chat.send_message(prompt, generation_config=GenerationConfig(**optional_params), stream=stream) optional_params["stream"] = True return model_response + request_str += f"chat.send_message({prompt}, generation_config=GenerationConfig(**{optional_params})).text\n" + ## LOGGING + logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params, "request_str": request_str}) + response_obj = chat.send_message(prompt, generation_config=GenerationConfig(**optional_params)) + completion_response = response_obj.text + response_obj = response_obj._raw_response elif mode == "vision": print_verbose("\nMaking VertexAI Gemini Pro Vision Call") print_verbose(f"\nProcessing input messages = {messages}")