mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(fix) counting response tokens+streaming
This commit is contained in:
parent
5055aeb254
commit
f3b8d9c3ef
2 changed files with 16 additions and 3 deletions
|
@ -3268,7 +3268,9 @@ def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
|||
print_verbose(f"token_counter failed, assuming prompt tokens is 0")
|
||||
response["usage"]["prompt_tokens"] = 0
|
||||
response["usage"]["completion_tokens"] = token_counter(
|
||||
model=model, text=completion_output
|
||||
model=model,
|
||||
text=combined_content,
|
||||
count_response_tokens=True, # count_response_tokens is a Flag to tell token counter this is a response, No need to add extra tokens we do for input messages
|
||||
)
|
||||
response["usage"]["total_tokens"] = (
|
||||
response["usage"]["prompt_tokens"] + response["usage"]["completion_tokens"]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue