From b985d996b246ab4483f7de1f7c349bbbc50b459f Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 25 Dec 2023 23:38:01 +0530 Subject: [PATCH] (feat) ollama_chat - add streaming support --- litellm/llms/ollama_chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/ollama_chat.py b/litellm/llms/ollama_chat.py index b844e28585..cfcc883fa8 100644 --- a/litellm/llms/ollama_chat.py +++ b/litellm/llms/ollama_chat.py @@ -234,13 +234,13 @@ def ollama_completion_stream(url, data, logging_obj): try: if response.status_code != 200: raise OllamaError( - status_code=response.status_code, message=response.text + status_code=response.status_code, message=response.iter_lines() ) streamwrapper = litellm.CustomStreamWrapper( completion_stream=response.iter_lines(), model=data["model"], - custom_llm_provider="ollama", + custom_llm_provider="ollama_chat", logging_obj=logging_obj, ) for transformed_chunk in streamwrapper: