forked from phoenix/litellm-mirror
(fix) acompletion for ollama non streaing
This commit is contained in:
parent
74c5e6f415
commit
bf4ce08640
1 changed files with 5 additions and 3 deletions
|
@ -1000,9 +1000,11 @@ def completion(
|
||||||
logging.pre_call(
|
logging.pre_call(
|
||||||
input=prompt, api_key=None, additional_args={"api_base": api_base, "custom_prompt_dict": litellm.custom_prompt_dict}
|
input=prompt, api_key=None, additional_args={"api_base": api_base, "custom_prompt_dict": litellm.custom_prompt_dict}
|
||||||
)
|
)
|
||||||
if kwargs.get('acompletion', False) == True:
|
if kwargs.get('acompletion', False) == True:
|
||||||
async_generator = ollama.async_get_ollama_response_stream(api_base, model, prompt, optional_params)
|
if optional_params.get("stream", False) == True:
|
||||||
return async_generator
|
# assume all ollama responses are streamed
|
||||||
|
async_generator = ollama.async_get_ollama_response_stream(api_base, model, prompt, optional_params)
|
||||||
|
return async_generator
|
||||||
|
|
||||||
generator = ollama.get_ollama_response_stream(api_base, model, prompt, optional_params)
|
generator = ollama.get_ollama_response_stream(api_base, model, prompt, optional_params)
|
||||||
if optional_params.get("stream", False) == True:
|
if optional_params.get("stream", False) == True:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue