forked from phoenix/litellm-mirror
fix(main.py): handle translating text completion openai to chat completion for async requests
also adds testing for this, to prevent future regressions
This commit is contained in:
parent
ba57434374
commit
855e7ed9d2
2 changed files with 21 additions and 0 deletions
|
@ -975,6 +975,19 @@ def test_completion_text_openai():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_completion_text_openai_async():
|
||||
try:
|
||||
# litellm.set_verbose =True
|
||||
response = await litellm.acompletion(
|
||||
model="gpt-3.5-turbo-instruct", messages=messages
|
||||
)
|
||||
print(response["choices"][0]["message"]["content"])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def custom_callback(
|
||||
kwargs, # kwargs to completion
|
||||
completion_response, # response from completion
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue