mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(main.py): handle translating text completion openai to chat completion for async requests
also adds testing for this, to prevent future regressions
This commit is contained in:
parent
ba57434374
commit
855e7ed9d2
2 changed files with 21 additions and 0 deletions
|
@ -317,6 +317,14 @@ async def acompletion(
|
|||
response = await init_response
|
||||
else:
|
||||
response = init_response # type: ignore
|
||||
|
||||
if custom_llm_provider == "text-completion-openai" and isinstance(
|
||||
response, TextCompletionResponse
|
||||
):
|
||||
response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object(
|
||||
response_object=response,
|
||||
model_response_object=litellm.ModelResponse(),
|
||||
)
|
||||
else:
|
||||
# Call the synchronous function using run_in_executor
|
||||
response = await loop.run_in_executor(None, func_with_context) # type: ignore
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue