fix installing on python3.8

This commit is contained in:
Ishaan Jaff 2024-07-16 16:56:15 -07:00
parent 06efe28132
commit 95af5c260e

View file

@ -60,6 +60,10 @@ from litellm.litellm_core_utils.redact_messages import (
) )
from litellm.litellm_core_utils.token_counter import get_modified_max_tokens from litellm.litellm_core_utils.token_counter import get_modified_max_tokens
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.types.llms.openai import (
ChatCompletionNamedToolChoiceParam,
ChatCompletionToolParam,
)
from litellm.types.utils import ( from litellm.types.utils import (
CallTypes, CallTypes,
ChatCompletionDeltaToolCall, ChatCompletionDeltaToolCall,
@ -79,7 +83,6 @@ from litellm.types.utils import (
TranscriptionResponse, TranscriptionResponse,
Usage, Usage,
) )
from litellm.types.llms.openai import ChatCompletionToolParam, ChatCompletionNamedToolChoiceParam
oidc_cache = DualCache() oidc_cache = DualCache()
@ -1572,7 +1575,7 @@ def openai_token_counter(
model="gpt-3.5-turbo-0613", model="gpt-3.5-turbo-0613",
text: Optional[str] = None, text: Optional[str] = None,
is_tool_call: Optional[bool] = False, is_tool_call: Optional[bool] = False,
tools: list[ChatCompletionToolParam] | None = None, tools: List[ChatCompletionToolParam] | None = None,
tool_choice: ChatCompletionNamedToolChoiceParam | None = None, tool_choice: ChatCompletionNamedToolChoiceParam | None = None,
count_response_tokens: Optional[ count_response_tokens: Optional[
bool bool
@ -1617,7 +1620,7 @@ def openai_token_counter(
for message in messages: for message in messages:
num_tokens += tokens_per_message num_tokens += tokens_per_message
if message.get("role", None) == "system": if message.get("role", None) == "system":
includes_system_message = True includes_system_message = True
for key, value in message.items(): for key, value in message.items():
if isinstance(value, str): if isinstance(value, str):
num_tokens += len(encoding.encode(value, disallowed_special=())) num_tokens += len(encoding.encode(value, disallowed_special=()))
@ -1868,6 +1871,7 @@ def _format_type(props, indent):
# This is a guess, as an empty string doesn't yield the expected token count # This is a guess, as an empty string doesn't yield the expected token count
return "any" return "any"
def token_counter( def token_counter(
model="", model="",
custom_tokenizer: Optional[dict] = None, custom_tokenizer: Optional[dict] = None,
@ -1955,7 +1959,7 @@ def token_counter(
is_tool_call=is_tool_call, is_tool_call=is_tool_call,
count_response_tokens=count_response_tokens, count_response_tokens=count_response_tokens,
tools=tools, tools=tools,
tool_choice=tool_choice tool_choice=tool_choice,
) )
else: else:
print_verbose( print_verbose(
@ -1968,7 +1972,7 @@ def token_counter(
is_tool_call=is_tool_call, is_tool_call=is_tool_call,
count_response_tokens=count_response_tokens, count_response_tokens=count_response_tokens,
tools=tools, tools=tools,
tool_choice=tool_choice tool_choice=tool_choice,
) )
else: else:
num_tokens = len(encoding.encode(text, disallowed_special=())) # type: ignore num_tokens = len(encoding.encode(text, disallowed_special=())) # type: ignore