From b2c1a3ad791df72c4bbf5160a5b621f219dc12e4 Mon Sep 17 00:00:00 2001 From: corrm Date: Mon, 24 Jun 2024 05:54:58 +0300 Subject: [PATCH 1/6] chore: Improved prompt generation in ollama_pt function --- litellm/llms/prompt_templates/factory.py | 25 +++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 398e96af7..02ed93fae 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -172,14 +172,21 @@ def ollama_pt( images.append(base64_image) return {"prompt": prompt, "images": images} else: - prompt = "".join( - ( - m["content"] - if isinstance(m["content"], str) is str - else "".join(m["content"]) - ) - for m in messages - ) + prompt = "" + for message in messages: + role = message["role"] + content = message.get("content", "") + + if "tool_calls" in message: + for call in message["tool_calls"]: + function_name = call["function"]["name"] + arguments = json.loads(call["function"]["arguments"]) + prompt += f"### Tool Call ({call["id"]}):\nFunction: {function_name}\nArguments: {json.dumps(arguments)}\n\n" + elif "tool_call_id" in message: + prompt += f"### Tool Call Result ({message["tool_call_id"]}):\n{message["content"]}\n\n" + elif content: + prompt += f"### {role.capitalize()}:\n{content}\n\n" + return prompt @@ -710,7 +717,7 @@ def convert_to_anthropic_tool_result_xml(message: dict) -> str: """ Anthropic tool_results look like: - + [Successful results] From 423a60c8bcfc1e7b4fd81ccbcba7757e772b85d8 Mon Sep 17 00:00:00 2001 From: corrm Date: Mon, 24 Jun 2024 05:55:22 +0300 Subject: [PATCH 2/6] chore: Improved OllamaConfig get_required_params and ollama_acompletion and ollama_async_streaming functions --- litellm/llms/ollama.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index e7dd1d5f5..1939715b3 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -126,7 +126,7 @@ class OllamaConfig: ) and v is not None } - + def get_required_params(self) -> List[ProviderField]: """For a given provider, return it's required fields with a description""" return [ @@ -451,7 +451,7 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj): { "id": f"call_{str(uuid.uuid4())}", "function": { - "name": function_call["name"], + "name": function_call.get("name", function_call.get("function", None)), "arguments": json.dumps(function_call["arguments"]), }, "type": "function", From b8a8b0847c9e68726e56674add4aaa2951482a99 Mon Sep 17 00:00:00 2001 From: corrm Date: Mon, 24 Jun 2024 05:56:56 +0300 Subject: [PATCH 3/6] Added improved function name handling in ollama_async_streaming --- litellm/llms/ollama_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index a7439bbcc..af6fd5b80 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -434,7 +434,7 @@ async def ollama_async_streaming( { "id": f"call_{str(uuid.uuid4())}", "function": { - "name": function_call["name"], + "name": function_call.get("name", function_call.get("function", None)), "arguments": json.dumps(function_call["arguments"]), }, "type": "function", From e2af13550a29db7ea0757a313c3590105c925a74 Mon Sep 17 00:00:00 2001 From: Islam Nofl Date: Mon, 24 Jun 2024 08:01:15 +0300 Subject: [PATCH 4/6] Rename ollama prompt 'Function' word to 'Name' --- litellm/llms/prompt_templates/factory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 02ed93fae..109c5b8d8 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -181,7 +181,7 @@ def ollama_pt( for call in message["tool_calls"]: function_name = call["function"]["name"] arguments = json.loads(call["function"]["arguments"]) - prompt += f"### Tool Call ({call["id"]}):\nFunction: {function_name}\nArguments: {json.dumps(arguments)}\n\n" + prompt += f"### Tool Call ({call["id"]}):\nName: {function_name}\nArguments: {json.dumps(arguments)}\n\n" elif "tool_call_id" in message: prompt += f"### Tool Call Result ({message["tool_call_id"]}):\n{message["content"]}\n\n" elif content: From b69a092f56c713ac9dcaf9dceb59c29faec2d315 Mon Sep 17 00:00:00 2001 From: corrm Date: Tue, 25 Jun 2024 12:40:07 +0300 Subject: [PATCH 5/6] Rename ollama prompt: - 'Function' word to 'FunctionName' - 'Tool Call' to `FunctionCall` - 'Tool Call Result' to 'FunctionCall Result' _I found that changes make some models better_ --- litellm/llms/prompt_templates/factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 109c5b8d8..7864d5ebc 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -181,9 +181,9 @@ def ollama_pt( for call in message["tool_calls"]: function_name = call["function"]["name"] arguments = json.loads(call["function"]["arguments"]) - prompt += f"### Tool Call ({call["id"]}):\nName: {function_name}\nArguments: {json.dumps(arguments)}\n\n" + prompt += f"### FunctionCall ({call["id"]}):\nFunctionName: {function_name}\nArguments: {json.dumps(arguments)}\n\n" elif "tool_call_id" in message: - prompt += f"### Tool Call Result ({message["tool_call_id"]}):\n{message["content"]}\n\n" + prompt += f"### FunctionCall Result ({message["tool_call_id"]}):\n{message["content"]}\n\n" elif content: prompt += f"### {role.capitalize()}:\n{content}\n\n" From 5b720137fdb2e65c9584bc1dbdb7a7e50120eb7e Mon Sep 17 00:00:00 2001 From: corrm Date: Tue, 25 Jun 2024 13:53:27 +0300 Subject: [PATCH 6/6] Improve ollama prompt: this formula give good result with AutoGen --- litellm/llms/prompt_templates/factory.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 7864d5ebc..e359d36f4 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -135,7 +135,7 @@ def convert_to_ollama_image(openai_image_url: str): def ollama_pt( - model, messages + model, messages ): # https://github.com/ollama/ollama/blob/af4cf55884ac54b9e637cd71dadfe9b7a5685877/docs/modelfile.md#template if "instruct" in model: prompt = custom_prompt( @@ -178,12 +178,27 @@ def ollama_pt( content = message.get("content", "") if "tool_calls" in message: + tool_calls = [] + for call in message["tool_calls"]: - function_name = call["function"]["name"] + call_id: str = call["id"] + function_name: str = call["function"]["name"] arguments = json.loads(call["function"]["arguments"]) - prompt += f"### FunctionCall ({call["id"]}):\nFunctionName: {function_name}\nArguments: {json.dumps(arguments)}\n\n" + + tool_calls.append({ + "id": call_id, + "type": "function", + "function": { + "name": function_name, + "arguments": arguments + } + }) + + prompt += f"### Assistant:\nTool Calls: {json.dumps(tool_calls, indent=2)}\n\n" + elif "tool_call_id" in message: - prompt += f"### FunctionCall Result ({message["tool_call_id"]}):\n{message["content"]}\n\n" + prompt += f"### User:\n{message["content"]}\n\n" + elif content: prompt += f"### {role.capitalize()}:\n{content}\n\n"