forked from phoenix/litellm-mirror
fix(together_ai.py): additional logging for together ai encoding prompt
This commit is contained in:
parent
cab870f73a
commit
a09a6f24a4
1 changed files with 1 additions and 0 deletions
|
@ -177,6 +177,7 @@ def completion(
|
||||||
model_response["choices"][0]["message"]["content"] = completion_response["output"]["choices"][0]["text"]
|
model_response["choices"][0]["message"]["content"] = completion_response["output"]["choices"][0]["text"]
|
||||||
|
|
||||||
## CALCULATING USAGE
|
## CALCULATING USAGE
|
||||||
|
print_verbose(f"CALCULATING TOGETHERAI TOKEN USAGE. Model Response: {model_response}; model_response['choices'][0]['message'].get('content', ''): {model_response['choices'][0]['message'].get('content', None)}")
|
||||||
prompt_tokens = len(encoding.encode(prompt))
|
prompt_tokens = len(encoding.encode(prompt))
|
||||||
completion_tokens = len(
|
completion_tokens = len(
|
||||||
encoding.encode(model_response["choices"][0]["message"].get("content", ""))
|
encoding.encode(model_response["choices"][0]["message"].get("content", ""))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue