From 57607f111acf973981f84570cbfa7bc74ae1113c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 22 Dec 2023 11:22:24 +0530 Subject: [PATCH] fix(ollama.py): use litellm.request timeout for async call timeout --- litellm/llms/ollama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index ceab2c7d3..b0d06ef62 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -216,7 +216,7 @@ async def ollama_async_streaming(url, data, model_response, encoding, logging_ob async def ollama_acompletion(url, data, model_response, encoding, logging_obj): data["stream"] = False try: - timeout = aiohttp.ClientTimeout(total=600) # 10 minutes + timeout = aiohttp.ClientTimeout(total=litellm.request_timeout) # 10 minutes async with aiohttp.ClientSession(timeout=timeout) as session: resp = await session.post(url, json=data)