diff --git a/litellm/main.py b/litellm/main.py index 1ee16f36ff..57a2352970 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -317,6 +317,14 @@ async def acompletion( response = await init_response else: response = init_response # type: ignore + + if custom_llm_provider == "text-completion-openai" and isinstance( + response, TextCompletionResponse + ): + response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object( + response_object=response, + model_response_object=litellm.ModelResponse(), + ) else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) # type: ignore diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 22e1fa70de..8ea9ca7603 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -975,6 +975,19 @@ def test_completion_text_openai(): pytest.fail(f"Error occurred: {e}") +@pytest.mark.asyncio +async def test_completion_text_openai_async(): + try: + # litellm.set_verbose =True + response = await litellm.acompletion( + model="gpt-3.5-turbo-instruct", messages=messages + ) + print(response["choices"][0]["message"]["content"]) + except Exception as e: + print(e) + pytest.fail(f"Error occurred: {e}") + + def custom_callback( kwargs, # kwargs to completion completion_response, # response from completion