diff --git a/litellm/tests/test_amazing_vertex_completion.py b/litellm/tests/test_amazing_vertex_completion.py index 35beb75fe6..d58d68507a 100644 --- a/litellm/tests/test_amazing_vertex_completion.py +++ b/litellm/tests/test_amazing_vertex_completion.py @@ -138,7 +138,7 @@ def test_vertex_ai(): def test_vertex_ai_stream(): load_vertex_ai_credentials() - litellm.set_verbose = False + litellm.set_verbose = True litellm.vertex_project = "reliablekeys" import random @@ -159,6 +159,8 @@ def test_vertex_ai_stream(): "code-gecko@latest", "code-bison@001", "text-bison@001", + "gemini-1.5-pro", + "gemini-1.5-pro-vision", ]: # our account does not have access to this model continue diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index dc1e1a097a..8c3187bd4a 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -801,7 +801,6 @@ def test_completion_bedrock_claude_stream(): raise Exception("finish reason not set for last chunk") if complete_response.strip() == "": raise Exception("Empty response received") - print(f"completion_response: {complete_response}") except RateLimitError: pass except Exception as e: diff --git a/litellm/utils.py b/litellm/utils.py index ad9b65998f..566c6f36df 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8705,7 +8705,7 @@ class CustomStreamWrapper: model_response.model = self.model print_verbose( - f"model_response finish reason 3: {model_response.choices[0].finish_reason}" + f"model_response finish reason 3: {model_response.choices[0].finish_reason}; response_obj={response_obj}" ) ## FUNCTION CALL PARSING if ( @@ -8773,11 +8773,11 @@ class CustomStreamWrapper: ## RETURN ARG if ( - response_obj.get("text", None) is not None + completion_obj["content"] is not None or response_obj.get("original_chunk", None) is not None ): hold = False - if response_obj.get("content", None) is not None: + if completion_obj["content"] is not None: hold, model_response_str = self.check_special_tokens( chunk=completion_obj["content"], finish_reason=model_response.choices[0].finish_reason,