From ab954243e814c92a1807d22ca65930c9df44d6c0 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 29 Apr 2024 08:09:59 -0700 Subject: [PATCH] fix(utils.py): fix watson streaming --- litellm/utils.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index 6e62b64c9..bb8df09b0 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -10154,21 +10154,6 @@ class CustomStreamWrapper: elif self.custom_llm_provider == "watsonx": response_obj = self.handle_watsonx_stream(chunk) completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj.get("prompt_tokens") is not None: - prompt_token_count = getattr( - model_response.usage, "prompt_tokens", 0 - ) - model_response.usage.prompt_tokens = ( - prompt_token_count + response_obj["prompt_tokens"] - ) - if response_obj.get("completion_tokens") is not None: - model_response.usage.completion_tokens = response_obj[ - "completion_tokens" - ] - model_response.usage.total_tokens = getattr( - model_response.usage, "prompt_tokens", 0 - ) + getattr(model_response.usage, "completion_tokens", 0) if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] elif self.custom_llm_provider == "text-completion-openai":