diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index e7dd1d5f5..1939715b3 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -126,7 +126,7 @@ class OllamaConfig: ) and v is not None } - + def get_required_params(self) -> List[ProviderField]: """For a given provider, return it's required fields with a description""" return [ @@ -451,7 +451,7 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj): { "id": f"call_{str(uuid.uuid4())}", "function": { - "name": function_call["name"], + "name": function_call.get("name", function_call.get("function", None)), "arguments": json.dumps(function_call["arguments"]), }, "type": "function", diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index a7439bbcc..af6fd5b80 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -434,7 +434,7 @@ async def ollama_async_streaming( { "id": f"call_{str(uuid.uuid4())}", "function": { - "name": function_call["name"], + "name": function_call.get("name", function_call.get("function", None)), "arguments": json.dumps(function_call["arguments"]), }, "type": "function", diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 398e96af7..e359d36f4 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -135,7 +135,7 @@ def convert_to_ollama_image(openai_image_url: str): def ollama_pt( - model, messages + model, messages ): # https://github.com/ollama/ollama/blob/af4cf55884ac54b9e637cd71dadfe9b7a5685877/docs/modelfile.md#template if "instruct" in model: prompt = custom_prompt( @@ -172,14 +172,36 @@ def ollama_pt( images.append(base64_image) return {"prompt": prompt, "images": images} else: - prompt = "".join( - ( - m["content"] - if isinstance(m["content"], str) is str - else "".join(m["content"]) - ) - for m in messages - ) + prompt = "" + for message in messages: + role = message["role"] + content = message.get("content", "") + + if "tool_calls" in message: + tool_calls = [] + + for call in message["tool_calls"]: + call_id: str = call["id"] + function_name: str = call["function"]["name"] + arguments = json.loads(call["function"]["arguments"]) + + tool_calls.append({ + "id": call_id, + "type": "function", + "function": { + "name": function_name, + "arguments": arguments + } + }) + + prompt += f"### Assistant:\nTool Calls: {json.dumps(tool_calls, indent=2)}\n\n" + + elif "tool_call_id" in message: + prompt += f"### User:\n{message["content"]}\n\n" + + elif content: + prompt += f"### {role.capitalize()}:\n{content}\n\n" + return prompt @@ -710,7 +732,7 @@ def convert_to_anthropic_tool_result_xml(message: dict) -> str: """ Anthropic tool_results look like: - + [Successful results]