diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 95ff8dfaa3..3628ae2903 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -320,11 +320,15 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj): model_response["choices"][0]["message"] = message else: model_response["choices"][0]["message"] = response_json["message"] + model_response["created"] = int(time.time()) - model_response["model"] = "ollama/" + data["model"] + model_response["model"] = "ollama_chat/" + data["model"] prompt_tokens = response_json.get("prompt_eval_count", litellm.token_counter(messages=data["messages"])) # type: ignore completion_tokens = response_json.get( - "eval_count", litellm.token_counter(text=response_json["message"]) + "eval_count", + litellm.token_counter( + text=response_json["message"]["content"], count_response_tokens=True + ), ) model_response["usage"] = litellm.Usage( prompt_tokens=prompt_tokens, diff --git a/litellm/utils.py b/litellm/utils.py index 1e83a319f4..8491a1d5e1 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -983,9 +983,6 @@ class Logging: verbose_logger.debug( f"RAW RESPONSE:\n{self.model_call_details.get('original_response', self.model_call_details)}\n\n" ) - verbose_logger.debug( - f"Logging Details Post-API Call: LiteLLM Params: {self.model_call_details}" - ) if self.logger_fn and callable(self.logger_fn): try: self.logger_fn(