fix(main.py): handle translating text completion openai to chat completion for async requests

also adds testing for this, to prevent future regressions
This commit is contained in:
Krrish Dholakia 2024-04-09 16:47:49 -07:00
parent ba57434374
commit 855e7ed9d2
2 changed files with 21 additions and 0 deletions

View file

@ -317,6 +317,14 @@ async def acompletion(
response = await init_response response = await init_response
else: else:
response = init_response # type: ignore response = init_response # type: ignore
if custom_llm_provider == "text-completion-openai" and isinstance(
response, TextCompletionResponse
):
response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object(
response_object=response,
model_response_object=litellm.ModelResponse(),
)
else: else:
# Call the synchronous function using run_in_executor # Call the synchronous function using run_in_executor
response = await loop.run_in_executor(None, func_with_context) # type: ignore response = await loop.run_in_executor(None, func_with_context) # type: ignore

View file

@ -975,6 +975,19 @@ def test_completion_text_openai():
pytest.fail(f"Error occurred: {e}") pytest.fail(f"Error occurred: {e}")
@pytest.mark.asyncio
async def test_completion_text_openai_async():
try:
# litellm.set_verbose =True
response = await litellm.acompletion(
model="gpt-3.5-turbo-instruct", messages=messages
)
print(response["choices"][0]["message"]["content"])
except Exception as e:
print(e)
pytest.fail(f"Error occurred: {e}")
def custom_callback( def custom_callback(
kwargs, # kwargs to completion kwargs, # kwargs to completion
completion_response, # response from completion completion_response, # response from completion