diff --git a/litellm/tests/test_stream_chunk_builder.py b/litellm/tests/test_stream_chunk_builder.py index ad72b22e2..08d89d1ab 100644 --- a/litellm/tests/test_stream_chunk_builder.py +++ b/litellm/tests/test_stream_chunk_builder.py @@ -164,8 +164,8 @@ def test_stream_chunk_builder_count_prompt_tokens(): non_stream_prompt_tokens = response.usage.prompt_tokens non_stream_model = response.model + assert stream_model == non_stream_model assert stream_prompt_tokens == non_stream_prompt_tokens - assert stream_model != non_stream_model except Exception as e: pytest.fail(f"An exception occurred - {str(e)}") diff --git a/litellm/utils.py b/litellm/utils.py index 8aa97a3af..b1b8a3173 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2505,7 +2505,7 @@ def token_counter(model="", text=None, messages: Optional[List] = None): int: The number of tokens in the text. """ # use tiktoken, anthropic, cohere or llama2's tokenizer depending on the model - is_tool_call = True + is_tool_call = False if text == None: if messages is not None: print_verbose(f"token_counter messages received: {messages}")