Litellm dev 03 08 2025 p3 (#9089)

* feat(ollama_chat.py): pass down http client to ollama_chat

enables easier testing

* fix(factory.py): fix passing images to ollama's `/api/generate` endpoint

Fixes https://github.com/BerriAI/litellm/issues/6683

* fix(factory.py): fix ollama pt to handle templating correctly
This commit is contained in:
Krish Dholakia 2025-03-09 18:20:56 -07:00 committed by GitHub
parent 93273723cd
commit e00d4fb18c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 165 additions and 52 deletions

View file

@ -1,7 +1,7 @@
import json
import time
import uuid
from typing import Any, List, Optional
from typing import Any, List, Optional, Union
import aiohttp
import httpx
@ -9,7 +9,11 @@ from pydantic import BaseModel
import litellm
from litellm import verbose_logger
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
get_async_httpx_client,
)
from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
from litellm.types.llms.ollama import OllamaToolCall, OllamaToolCallFunction
from litellm.types.llms.openai import ChatCompletionAssistantToolCall
@ -205,6 +209,7 @@ def get_ollama_response( # noqa: PLR0915
api_key: Optional[str] = None,
acompletion: bool = False,
encoding=None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
):
if api_base.endswith("/api/chat"):
url = api_base
@ -301,7 +306,11 @@ def get_ollama_response( # noqa: PLR0915
headers: Optional[dict] = None
if api_key is not None:
headers = {"Authorization": "Bearer {}".format(api_key)}
response = litellm.module_level_client.post(
sync_client = litellm.module_level_client
if client is not None and isinstance(client, HTTPHandler):
sync_client = client
response = sync_client.post(
url=url,
json=data,
headers=headers,