From 855e7ed9d234ea9dbe63be5a29b3453372db8638 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 9 Apr 2024 16:47:49 -0700 Subject: [PATCH] fix(main.py): handle translating text completion openai to chat completion for async requests also adds testing for this, to prevent future regressions --- litellm/main.py | 8 ++++++++ litellm/tests/test_completion.py | 13 +++++++++++++ 2 files changed, 21 insertions(+) diff --git a/litellm/main.py b/litellm/main.py index 1ee16f36f..57a235297 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -317,6 +317,14 @@ async def acompletion( response = await init_response else: response = init_response # type: ignore + + if custom_llm_provider == "text-completion-openai" and isinstance( + response, TextCompletionResponse + ): + response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object( + response_object=response, + model_response_object=litellm.ModelResponse(), + ) else: # Call the synchronous function using run_in_executor response = await loop.run_in_executor(None, func_with_context) # type: ignore diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 22e1fa70d..8ea9ca760 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -975,6 +975,19 @@ def test_completion_text_openai(): pytest.fail(f"Error occurred: {e}") +@pytest.mark.asyncio +async def test_completion_text_openai_async(): + try: + # litellm.set_verbose =True + response = await litellm.acompletion( + model="gpt-3.5-turbo-instruct", messages=messages + ) + print(response["choices"][0]["message"]["content"]) + except Exception as e: + print(e) + pytest.fail(f"Error occurred: {e}") + + def custom_callback( kwargs, # kwargs to completion completion_response, # response from completion