mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(main.py): handle translating text completion openai to chat completion for async requests
also adds testing for this, to prevent future regressions
This commit is contained in:
parent
ba57434374
commit
855e7ed9d2
2 changed files with 21 additions and 0 deletions
|
@ -317,6 +317,14 @@ async def acompletion(
|
|||
response = await init_response
|
||||
else:
|
||||
response = init_response # type: ignore
|
||||
|
||||
if custom_llm_provider == "text-completion-openai" and isinstance(
|
||||
response, TextCompletionResponse
|
||||
):
|
||||
response = litellm.OpenAITextCompletionConfig().convert_to_chat_model_response_object(
|
||||
response_object=response,
|
||||
model_response_object=litellm.ModelResponse(),
|
||||
)
|
||||
else:
|
||||
# Call the synchronous function using run_in_executor
|
||||
response = await loop.run_in_executor(None, func_with_context) # type: ignore
|
||||
|
|
|
@ -975,6 +975,19 @@ def test_completion_text_openai():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_completion_text_openai_async():
|
||||
try:
|
||||
# litellm.set_verbose =True
|
||||
response = await litellm.acompletion(
|
||||
model="gpt-3.5-turbo-instruct", messages=messages
|
||||
)
|
||||
print(response["choices"][0]["message"]["content"])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def custom_callback(
|
||||
kwargs, # kwargs to completion
|
||||
completion_response, # response from completion
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue