forked from phoenix/litellm-mirror
track vertex ai usage
This commit is contained in:
parent
e1d0c6096c
commit
cd91312252
1 changed files with 26 additions and 0 deletions
|
@ -784,6 +784,19 @@ def completion(
|
||||||
model_response["choices"][0]["message"]["content"] = str(completion_response)
|
model_response["choices"][0]["message"]["content"] = str(completion_response)
|
||||||
model_response["created"] = time.time()
|
model_response["created"] = time.time()
|
||||||
model_response["model"] = model
|
model_response["model"] = model
|
||||||
|
## CALCULATING USAGE
|
||||||
|
prompt_tokens = len(
|
||||||
|
encoding.encode(prompt)
|
||||||
|
)
|
||||||
|
completion_tokens = len(
|
||||||
|
encoding.encode(model_response["choices"][0]["message"]["content"])
|
||||||
|
)
|
||||||
|
|
||||||
|
model_response["usage"] = {
|
||||||
|
"prompt_tokens": prompt_tokens,
|
||||||
|
"completion_tokens": completion_tokens,
|
||||||
|
"total_tokens": prompt_tokens + completion_tokens,
|
||||||
|
}
|
||||||
response = model_response
|
response = model_response
|
||||||
elif model in litellm.vertex_text_models or model in litellm.vertex_code_text_models:
|
elif model in litellm.vertex_text_models or model in litellm.vertex_code_text_models:
|
||||||
try:
|
try:
|
||||||
|
@ -823,6 +836,19 @@ def completion(
|
||||||
model_response["choices"][0]["message"]["content"] = str(completion_response)
|
model_response["choices"][0]["message"]["content"] = str(completion_response)
|
||||||
model_response["created"] = time.time()
|
model_response["created"] = time.time()
|
||||||
model_response["model"] = model
|
model_response["model"] = model
|
||||||
|
## CALCULATING USAGE
|
||||||
|
prompt_tokens = len(
|
||||||
|
encoding.encode(prompt)
|
||||||
|
)
|
||||||
|
completion_tokens = len(
|
||||||
|
encoding.encode(model_response["choices"][0]["message"]["content"])
|
||||||
|
)
|
||||||
|
|
||||||
|
model_response["usage"] = {
|
||||||
|
"prompt_tokens": prompt_tokens,
|
||||||
|
"completion_tokens": completion_tokens,
|
||||||
|
"total_tokens": prompt_tokens + completion_tokens,
|
||||||
|
}
|
||||||
response = model_response
|
response = model_response
|
||||||
elif model in litellm.ai21_models:
|
elif model in litellm.ai21_models:
|
||||||
custom_llm_provider = "ai21"
|
custom_llm_provider = "ai21"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue