Litellm dev 03 08 2025 p3 (#9089)

* feat(ollama_chat.py): pass down http client to ollama_chat

enables easier testing

* fix(factory.py): fix passing images to ollama's `/api/generate` endpoint

Fixes https://github.com/BerriAI/litellm/issues/6683

* fix(factory.py): fix ollama pt to handle templating correctly
This commit is contained in:
Krish Dholakia 2025-03-09 18:20:56 -07:00 committed by GitHub
parent 93273723cd
commit e00d4fb18c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 165 additions and 52 deletions

View file

@ -1,4 +1,5 @@
import asyncio
import json
import os
import sys
import traceback
@ -76,6 +77,45 @@ def test_ollama_json_mode():
# test_ollama_json_mode()
def test_ollama_vision_model():
from litellm.llms.custom_httpx.http_handler import HTTPHandler
client = HTTPHandler()
from unittest.mock import patch
with patch.object(client, "post") as mock_post:
try:
litellm.completion(
model="ollama/llama3.2-vision:11b",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Whats in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://dummyimage.com/100/100/fff&text=Test+image"
},
},
],
}
],
client=client,
)
except Exception as e:
print(e)
mock_post.assert_called()
print(mock_post.call_args.kwargs)
json_data = json.loads(mock_post.call_args.kwargs["data"])
assert json_data["model"] == "llama3.2-vision:11b"
assert "images" in json_data
assert "prompt" in json_data
assert json_data["prompt"].startswith("### User:\n")
mock_ollama_embedding_response = EmbeddingResponse(model="ollama/nomic-embed-text")