mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(main.py): support async streaming for text completions endpoint
This commit is contained in:
parent
ba23151d92
commit
bb5b883316
7 changed files with 175 additions and 68 deletions
|
@ -521,12 +521,14 @@ class OpenAITextCompletion(BaseLLM):
|
|||
else:
|
||||
prompt = " ".join([message["content"] for message in messages]) # type: ignore
|
||||
|
||||
# don't send max retries to the api, if set
|
||||
optional_params.pop("max_retries", None)
|
||||
|
||||
data = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
**optional_params
|
||||
}
|
||||
|
||||
## LOGGING
|
||||
logging_obj.pre_call(
|
||||
input=messages,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue