From c45085b728ab2994db7cf0412a9aa076d1c11b77 Mon Sep 17 00:00:00 2001 From: Rajan Paneru Date: Fri, 10 May 2024 10:06:50 +0930 Subject: [PATCH 1/3] Based on the data-type using json The value of response_obj["choices"][0]["message"] is Message object and dict Added a conditional to use .json only iff it is Message Object --- litellm/integrations/langfuse.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index caf5437b2..140dced9d 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -135,7 +135,9 @@ class LangFuseLogger: response_obj, litellm.ModelResponse ): input = prompt - output = response_obj["choices"][0]["message"].json() + output = response_obj["choices"][0]["message"] + if isinstance(output, litellm.Message): + output = output.json() elif response_obj is not None and isinstance( response_obj, litellm.TextCompletionResponse ): From 8eb842dcf584c9cf6ff9d7675899fb84f39e35e7 Mon Sep 17 00:00:00 2001 From: Rajan Paneru Date: Fri, 10 May 2024 22:04:44 +0930 Subject: [PATCH 2/3] revered the patch so that the fix can be applied in the main place --- litellm/integrations/langfuse.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 140dced9d..caf5437b2 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -135,9 +135,7 @@ class LangFuseLogger: response_obj, litellm.ModelResponse ): input = prompt - output = response_obj["choices"][0]["message"] - if isinstance(output, litellm.Message): - output = output.json() + output = response_obj["choices"][0]["message"].json() elif response_obj is not None and isinstance( response_obj, litellm.TextCompletionResponse ): From 65b07bcb8c737152f26afa6b51cb3919a05007c3 Mon Sep 17 00:00:00 2001 From: Rajan Paneru Date: Fri, 10 May 2024 22:12:32 +0930 Subject: [PATCH 3/3] Preserving the Pydantic Message Object Following statement replaces the Pydantic Message Object and initialize it with the dict model_response["choices"][0]["message"] = response_json["message"] We need to make sure message is always litellm.Message object As a fix, based on the code of ollama.py file, i am updating just the content intead of entire object for both sync and async functions --- litellm/llms/ollama_chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index 866761905..d1ff4953f 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -300,7 +300,7 @@ def get_ollama_response( model_response["choices"][0]["message"] = message model_response["choices"][0]["finish_reason"] = "tool_calls" else: - model_response["choices"][0]["message"] = response_json["message"] + model_response["choices"][0]["message"]["content"] = response_json["message"]["content"] model_response["created"] = int(time.time()) model_response["model"] = "ollama/" + model prompt_tokens = response_json.get("prompt_eval_count", litellm.token_counter(messages=messages)) # type: ignore @@ -484,7 +484,7 @@ async def ollama_acompletion( model_response["choices"][0]["message"] = message model_response["choices"][0]["finish_reason"] = "tool_calls" else: - model_response["choices"][0]["message"] = response_json["message"] + model_response["choices"][0]["message"]["content"] = response_json["message"]["content"] model_response["created"] = int(time.time()) model_response["model"] = "ollama_chat/" + data["model"]