diff --git a/litellm/utils.py b/litellm/utils.py index 82c0e94d1..264b28e06 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1575,8 +1575,8 @@ def openai_token_counter( model="gpt-3.5-turbo-0613", text: Optional[str] = None, is_tool_call: Optional[bool] = False, - tools: List[ChatCompletionToolParam] | None = None, - tool_choice: ChatCompletionNamedToolChoiceParam | None = None, + tools: Optional[List[ChatCompletionToolParam]] = None, + tool_choice: Optional[ChatCompletionNamedToolChoiceParam] = None, count_response_tokens: Optional[ bool ] = False, # Flag passed from litellm.stream_chunk_builder, to indicate counting tokens for LLM Response. We need this because for LLM input we add +3 tokens per message - based on OpenAI's token counter