forked from phoenix/litellm-mirror
fix(main.py): fix streaming completion token counting error
This commit is contained in:
parent
3080f27b54
commit
61f2fe5837
2 changed files with 16 additions and 48 deletions
|
@ -3357,7 +3357,7 @@ def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
|||
response["usage"]["prompt_tokens"] = 0
|
||||
response["usage"]["completion_tokens"] = token_counter(
|
||||
model=model,
|
||||
text=combined_content,
|
||||
text=completion_output,
|
||||
count_response_tokens=True, # count_response_tokens is a Flag to tell token counter this is a response, No need to add extra tokens we do for input messages
|
||||
)
|
||||
response["usage"]["total_tokens"] = (
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue