From de47058e3206b16e96afabdcdd40b5384ba726fb Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 31 Oct 2023 15:31:24 -0700 Subject: [PATCH] (feat) text_completion return raw openai response for text_completion requests --- litellm/main.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/litellm/main.py b/litellm/main.py index ba6ad978f5..b091e236fd 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -562,6 +562,7 @@ def completion( }, ) ## RESPONSE OBJECT + model_response._hidden_params["original_response"] = response # track original response, if users make a litellm.text_completion() request, we can return the original response choices_list = [] for idx, item in enumerate(response["choices"]): if len(item["text"]) > 0: @@ -1776,11 +1777,18 @@ def text_completion(*args, **kwargs): """ This maps to the Openai.Completion.create format, which has a different I/O (accepts prompt, returning ["choices"]["text"]. """ + if "engine" in kwargs: + kwargs["model"] = kwargs["engine"] + kwargs.pop("engine") if "prompt" in kwargs: messages = [{"role": "system", "content": kwargs["prompt"]}] kwargs["messages"] = messages kwargs.pop("prompt") response = completion(*args, **kwargs) # assume the response is the openai response object + + # if the model is text-davinci-003, return raw response from openai + if kwargs["model"] in litellm.open_ai_text_completion_models and response._hidden_params.get("original_response", None) != None: + return response._hidden_params.get("original_response", None) formatted_response_obj = { "id": response["id"], "object": "text_completion",