diff --git a/litellm/llms/together_ai.py b/litellm/llms/together_ai.py index 210ed497e..b4964cef4 100644 --- a/litellm/llms/together_ai.py +++ b/litellm/llms/together_ai.py @@ -177,6 +177,7 @@ def completion( model_response["choices"][0]["message"]["content"] = completion_response["output"]["choices"][0]["text"] ## CALCULATING USAGE + print_verbose(f"CALCULATING TOGETHERAI TOKEN USAGE. Model Response: {model_response}; model_response['choices'][0]['message'].get('content', ''): {model_response['choices'][0]['message'].get('content', None)}") prompt_tokens = len(encoding.encode(prompt)) completion_tokens = len( encoding.encode(model_response["choices"][0]["message"].get("content", ""))