forked from phoenix/litellm-mirror
fix(utils.py): handle count response tokens false case token counting
This commit is contained in:
parent
109ccf4cef
commit
1bdb332454
1 changed files with 2 additions and 0 deletions
|
@ -3042,6 +3042,8 @@ def openai_token_counter(
|
||||||
# This is the case where we need to count tokens for a streamed response. We should NOT add +3 tokens per message in this branch
|
# This is the case where we need to count tokens for a streamed response. We should NOT add +3 tokens per message in this branch
|
||||||
num_tokens = len(encoding.encode(text, disallowed_special=()))
|
num_tokens = len(encoding.encode(text, disallowed_special=()))
|
||||||
return num_tokens
|
return num_tokens
|
||||||
|
elif text is not None:
|
||||||
|
num_tokens = len(encoding.encode(text, disallowed_special=()))
|
||||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||||
return num_tokens
|
return num_tokens
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue