(fix) acompletion for ollama non streaing

This commit is contained in:
ishaan-jaff 2023-10-09 13:42:56 -07:00
parent 5cff54a79e
commit 9d6088f65c

View file

@ -1000,9 +1000,11 @@ def completion(
logging.pre_call( logging.pre_call(
input=prompt, api_key=None, additional_args={"api_base": api_base, "custom_prompt_dict": litellm.custom_prompt_dict} input=prompt, api_key=None, additional_args={"api_base": api_base, "custom_prompt_dict": litellm.custom_prompt_dict}
) )
if kwargs.get('acompletion', False) == True: if kwargs.get('acompletion', False) == True:
async_generator = ollama.async_get_ollama_response_stream(api_base, model, prompt, optional_params) if optional_params.get("stream", False) == True:
return async_generator # assume all ollama responses are streamed
async_generator = ollama.async_get_ollama_response_stream(api_base, model, prompt, optional_params)
return async_generator
generator = ollama.get_ollama_response_stream(api_base, model, prompt, optional_params) generator = ollama.get_ollama_response_stream(api_base, model, prompt, optional_params)
if optional_params.get("stream", False) == True: if optional_params.get("stream", False) == True: