mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
add token usage
This commit is contained in:
parent
39efc57d84
commit
a9186dc40c
5 changed files with 70 additions and 5 deletions
|
@ -133,9 +133,8 @@ def client(original_function):
|
|||
|
||||
####### USAGE CALCULATOR ################
|
||||
|
||||
def prompt_token_calculator(model, messages):
|
||||
def token_counter(model, text):
|
||||
# use tiktoken or anthropic's tokenizer depending on the model
|
||||
text = " ".join(message["content"] for message in messages)
|
||||
num_tokens = 0
|
||||
if "claude" in model:
|
||||
install_and_import('anthropic')
|
||||
|
@ -168,9 +167,15 @@ def cost_per_token(model="gpt-3.5-turbo", prompt_tokens = 0, completion_tokens =
|
|||
avg_output_cost = output_cost_sum / len(model_cost_ref.keys())
|
||||
prompt_tokens_cost_usd_dollar = avg_input_cost * prompt_tokens
|
||||
completion_tokens_cost_usd_dollar = avg_output_cost * completion_tokens
|
||||
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
|
||||
return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
|
||||
|
||||
|
||||
def completion_cost(model="gpt-3.5-turbo", prompt="", completion=""):
|
||||
prompt_tokens = tokenizer(model=model, text=prompt)
|
||||
completion_tokens = tokenizer(model=model, text=completion)
|
||||
prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_token(model=model, prompt_tokens = prompt_tokens, completion_tokens = completion_tokens)
|
||||
return prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
|
||||
|
||||
####### HELPER FUNCTIONS ################
|
||||
def get_optional_params(
|
||||
# 12 optional params
|
||||
|
@ -466,6 +471,19 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
|||
print_verbose(f"[Non-Blocking] Success Callback Error - {traceback.format_exc()}")
|
||||
pass
|
||||
|
||||
def prompt_token_calculator(model, messages):
|
||||
# use tiktoken or anthropic's tokenizer depending on the model
|
||||
text = " ".join(message["content"] for message in messages)
|
||||
num_tokens = 0
|
||||
if "claude" in model:
|
||||
install_and_import('anthropic')
|
||||
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||
anthropic = Anthropic()
|
||||
num_tokens = anthropic.count_tokens(text)
|
||||
else:
|
||||
num_tokens = len(encoding.encode(text))
|
||||
return num_tokens
|
||||
|
||||
# integration helper function
|
||||
def modify_integration(integration_name, integration_params):
|
||||
global supabaseClient
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue