From d3baab3bbbb7c2da47da4e2ef6b5e8e1d544ac03 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 25 Dec 2023 23:03:47 +0530 Subject: [PATCH] utils - convert ollama_chat params --- litellm/utils.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index b6afdeb2d..33673d4e4 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2890,7 +2890,7 @@ def get_optional_params( # use the openai defaults and custom_llm_provider != "text-completion-openai" and custom_llm_provider != "azure" ): - if custom_llm_provider == "ollama": + if custom_llm_provider == "ollama" or custom_llm_provider == "ollama_chat": # ollama actually supports json output optional_params["format"] = "json" litellm.add_function_to_prompt = ( @@ -3334,6 +3334,29 @@ def get_optional_params( # use the openai defaults ] _check_valid_arg(supported_params=supported_params) + if max_tokens is not None: + optional_params["num_predict"] = max_tokens + if stream: + optional_params["stream"] = stream + if temperature is not None: + optional_params["temperature"] = temperature + if top_p is not None: + optional_params["top_p"] = top_p + if frequency_penalty is not None: + optional_params["repeat_penalty"] = frequency_penalty + if stop is not None: + optional_params["stop_sequences"] = stop + elif custom_llm_provider == "ollama_chat": + supported_params = [ + "max_tokens", + "stream", + "top_p", + "temperature", + "frequency_penalty", + "stop", + ] + _check_valid_arg(supported_params=supported_params) + if max_tokens is not None: optional_params["num_predict"] = max_tokens if stream: