mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(feat) text completion response now OpenAI Object
This commit is contained in:
parent
4c895d2e91
commit
d4430fc51e
1 changed files with 24 additions and 25 deletions
|
@ -60,6 +60,8 @@ from litellm.utils import (
|
||||||
get_secret,
|
get_secret,
|
||||||
CustomStreamWrapper,
|
CustomStreamWrapper,
|
||||||
ModelResponse,
|
ModelResponse,
|
||||||
|
TextCompletionResponse,
|
||||||
|
TextChoices,
|
||||||
EmbeddingResponse,
|
EmbeddingResponse,
|
||||||
read_config_args,
|
read_config_args,
|
||||||
Choices,
|
Choices,
|
||||||
|
@ -1873,6 +1875,7 @@ def text_completion(*args, **kwargs):
|
||||||
if "prompt" not in kwargs:
|
if "prompt" not in kwargs:
|
||||||
raise ValueError("please pass prompt into the `text_completion` endpoint - `text_completion(model, prompt='hello world')`")
|
raise ValueError("please pass prompt into the `text_completion` endpoint - `text_completion(model, prompt='hello world')`")
|
||||||
|
|
||||||
|
text_completion_response = TextCompletionResponse()
|
||||||
model = kwargs["model"]
|
model = kwargs["model"]
|
||||||
prompt = kwargs["prompt"]
|
prompt = kwargs["prompt"]
|
||||||
# get custom_llm_provider
|
# get custom_llm_provider
|
||||||
|
@ -1906,15 +1909,15 @@ def text_completion(*args, **kwargs):
|
||||||
new_kwargs["prompt"] = decoded_prompt
|
new_kwargs["prompt"] = decoded_prompt
|
||||||
response = text_completion(**new_kwargs)
|
response = text_completion(**new_kwargs)
|
||||||
responses[i] = response["choices"][0]
|
responses[i] = response["choices"][0]
|
||||||
formatted_response_obj = {
|
|
||||||
"id": response["id"],
|
text_completion_response["id"] = response["id"]
|
||||||
"object": "text_completion",
|
text_completion_response["object"] = "text_completion"
|
||||||
"created": response["created"],
|
text_completion_response["created"] = response["created"]
|
||||||
"model": response["model"],
|
text_completion_response["model"] = response["model"]
|
||||||
"choices": responses,
|
text_completion_response["choices"] = responses
|
||||||
"usage": response["usage"]
|
text_completion_response["usage"] = response["usage"]
|
||||||
}
|
|
||||||
return formatted_response_obj
|
return text_completion_response
|
||||||
else:
|
else:
|
||||||
messages = [{"role": "system", "content": kwargs["prompt"]}]
|
messages = [{"role": "system", "content": kwargs["prompt"]}]
|
||||||
kwargs["messages"] = messages
|
kwargs["messages"] = messages
|
||||||
|
@ -1928,22 +1931,18 @@ def text_completion(*args, **kwargs):
|
||||||
transformed_logprobs = litellm.utils.transform_logprobs(raw_response)
|
transformed_logprobs = litellm.utils.transform_logprobs(raw_response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("LiteLLM non blocking exception", e)
|
print("LiteLLM non blocking exception", e)
|
||||||
formatted_response_obj = {
|
text_completion_response["id"] = response["id"]
|
||||||
"id": response["id"],
|
text_completion_response["object"] = "text_completion"
|
||||||
"object": "text_completion",
|
text_completion_response["created"] = response["created"]
|
||||||
"created": response["created"],
|
text_completion_response["model"] = response["model"]
|
||||||
"model": response["model"],
|
text_choices = TextChoices()
|
||||||
"choices": [
|
text_choices["text"] = response["choices"][0]["message"]["content"]
|
||||||
{
|
text_choices["index"] = response["choices"][0]["index"]
|
||||||
"text": response["choices"][0]["message"]["content"],
|
text_choices["logprobs"] = transformed_logprobs
|
||||||
"index": response["choices"][0]["index"],
|
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
|
||||||
"logprobs": transformed_logprobs,
|
text_completion_response["choices"] = [text_choices]
|
||||||
"finish_reason": response["choices"][0]["finish_reason"]
|
text_completion_response["usage"] = response["usage"]
|
||||||
}
|
return text_completion_response
|
||||||
],
|
|
||||||
"usage": response["usage"]
|
|
||||||
}
|
|
||||||
return formatted_response_obj
|
|
||||||
|
|
||||||
##### Moderation #######################
|
##### Moderation #######################
|
||||||
def moderation(input: str, api_key: Optional[str]=None):
|
def moderation(input: str, api_key: Optional[str]=None):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue