fix(main.py): handle translating text completion openai to chat completion for async requests

also adds testing for this, to prevent future regressions
This commit is contained in:
Krrish Dholakia 2024-04-09 16:47:49 -07:00
parent ba57434374
commit 855e7ed9d2
2 changed files with 21 additions and 0 deletions

View file

@ -317,6 +317,14 @@ async def acompletion(
response = await init_response
else:
response = init_response # type: ignore
if custom_llm_provider == "text-completion-openai" and isinstance(
response, TextCompletionResponse
):
response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object(
response_object=response,
model_response_object=litellm.ModelResponse(),
)
else:
# Call the synchronous function using run_in_executor
response = await loop.run_in_executor(None, func_with_context) # type: ignore