diff --git a/litellm/main.py b/litellm/main.py index f8dbfa1c1e..ef8a61758c 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -158,7 +158,7 @@ def completion( model=model, prompt = prompt ) - completion_response = response["choices"]["text"] + completion_response = response["choices"][0]["text"] ## LOGGING logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn) ## RESPONSE OBJECT diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 066a6a4f2e..b4dddba35e 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -90,6 +90,14 @@ def test_completion_openai(): except Exception as e: pytest.fail(f"Error occurred: {e}") +def test_completion_text_openai(): + try: + response = completion(model="text-davinci-003", messages=messages) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + def test_completion_openai_with_optional_params(): try: response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai")