forked from phoenix/litellm-mirror
fix(utils.py): expand openai_token_counter selection
This commit is contained in:
parent
ff4457e2d2
commit
aa36bd2784
1 changed files with 4 additions and 13 deletions
|
@ -1572,21 +1572,12 @@ def openai_token_counter(messages: Optional[list]=None, model="gpt-3.5-turbo-061
|
||||||
except KeyError:
|
except KeyError:
|
||||||
print_verbose("Warning: model not found. Using cl100k_base encoding.")
|
print_verbose("Warning: model not found. Using cl100k_base encoding.")
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
if model in {
|
if model == "gpt-3.5-turbo-0301":
|
||||||
"gpt-3.5-turbo",
|
|
||||||
"gpt-3.5-turbo-0613",
|
|
||||||
"gpt-3.5-turbo-16k-0613",
|
|
||||||
"gpt-4",
|
|
||||||
"gpt-4-0314",
|
|
||||||
"gpt-4-32k-0314",
|
|
||||||
"gpt-4-0613",
|
|
||||||
"gpt-4-32k-0613",
|
|
||||||
}:
|
|
||||||
tokens_per_message = 3
|
|
||||||
tokens_per_name = 1
|
|
||||||
elif model == "gpt-3.5-turbo-0301":
|
|
||||||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||||
|
elif model in litellm.open_ai_chat_completion_models:
|
||||||
|
tokens_per_message = 3
|
||||||
|
tokens_per_name = 1
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue