fix(main.py): support async streaming for text completions endpoint

This commit is contained in:
Krrish Dholakia 2023-12-14 13:56:32 -08:00
parent ba23151d92
commit bb5b883316
7 changed files with 175 additions and 68 deletions

View file

@ -521,12 +521,14 @@ class OpenAITextCompletion(BaseLLM):
else:
prompt = " ".join([message["content"] for message in messages]) # type: ignore
# don't send max retries to the api, if set
optional_params.pop("max_retries", None)
data = {
"model": model,
"prompt": prompt,
**optional_params
}
## LOGGING
logging_obj.pre_call(
input=messages,