From 3f6e6e7f55486ce9b907b62cedb37d4a151b3e44 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 26 Dec 2023 20:07:55 +0530 Subject: [PATCH] (fix) ollama_chat - support function calling + fix for comp --- litellm/llms/ollama_chat.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index be5a20cea..1ff93649f 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -201,20 +201,23 @@ def get_ollama_response( ## RESPONSE OBJECT model_response["choices"][0]["finish_reason"] = "stop" - if optional_params.get("format", "") == "json": + if data.get("format", "") == "json": message = litellm.Message( content=None, tool_calls=[ { "id": f"call_{str(uuid.uuid4())}", - "function": {"arguments": response_json["response"], "name": ""}, + "function": { + "arguments": response_json["message"]["content"], + "name": "", + }, "type": "function", } ], ) model_response["choices"][0]["message"] = message else: - model_response["choices"][0]["message"]["content"] = response_json["response"] + model_response["choices"][0]["message"] = response_json["message"] model_response["created"] = int(time.time()) model_response["model"] = "ollama/" + model prompt_tokens = response_json["prompt_eval_count"] # type: ignore @@ -305,7 +308,7 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj): { "id": f"call_{str(uuid.uuid4())}", "function": { - "arguments": response_json["response"], + "arguments": response_json["message"]["content"], "name": "", }, "type": "function",