mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
(fix) text_completion fixes
This commit is contained in:
parent
c166b14a3b
commit
2a15da509f
1 changed files with 9 additions and 8 deletions
|
@ -475,6 +475,7 @@ def completion(
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
try:
|
try:
|
||||||
if custom_llm_provider == "custom_openai":
|
if custom_llm_provider == "custom_openai":
|
||||||
|
print("making call using openai custom chat completion")
|
||||||
response = openai_proxy_chat_completions.completion(
|
response = openai_proxy_chat_completions.completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
@ -2002,12 +2003,12 @@ def text_completion(
|
||||||
)
|
)
|
||||||
responses[i] = response["choices"][0]
|
responses[i] = response["choices"][0]
|
||||||
|
|
||||||
text_completion_response["id"] = response["id"]
|
text_completion_response["id"] = response.get("id", None)
|
||||||
text_completion_response["object"] = "text_completion"
|
text_completion_response["object"] = "text_completion"
|
||||||
text_completion_response["created"] = response["created"]
|
text_completion_response["created"] = response.get("created", None)
|
||||||
text_completion_response["model"] = response["model"]
|
text_completion_response["model"] = response.get("model", None)
|
||||||
text_completion_response["choices"] = responses
|
text_completion_response["choices"] = responses
|
||||||
text_completion_response["usage"] = response["usage"]
|
text_completion_response["usage"] = response.get("usage", None)
|
||||||
|
|
||||||
return text_completion_response
|
return text_completion_response
|
||||||
else:
|
else:
|
||||||
|
@ -2032,17 +2033,17 @@ def text_completion(
|
||||||
transformed_logprobs = litellm.utils.transform_logprobs(raw_response)
|
transformed_logprobs = litellm.utils.transform_logprobs(raw_response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print_verbose(f"LiteLLM non blocking exception: {e}")
|
print_verbose(f"LiteLLM non blocking exception: {e}")
|
||||||
text_completion_response["id"] = response["id"]
|
text_completion_response["id"] = response.get("id", None)
|
||||||
text_completion_response["object"] = "text_completion"
|
text_completion_response["object"] = "text_completion"
|
||||||
text_completion_response["created"] = response["created"]
|
text_completion_response["created"] = response.get("created", None)
|
||||||
text_completion_response["model"] = response["model"]
|
text_completion_response["model"] = response.get("model", None)
|
||||||
text_choices = TextChoices()
|
text_choices = TextChoices()
|
||||||
text_choices["text"] = response["choices"][0]["message"]["content"]
|
text_choices["text"] = response["choices"][0]["message"]["content"]
|
||||||
text_choices["index"] = response["choices"][0]["index"]
|
text_choices["index"] = response["choices"][0]["index"]
|
||||||
text_choices["logprobs"] = transformed_logprobs
|
text_choices["logprobs"] = transformed_logprobs
|
||||||
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
|
text_choices["finish_reason"] = response["choices"][0]["finish_reason"]
|
||||||
text_completion_response["choices"] = [text_choices]
|
text_completion_response["choices"] = [text_choices]
|
||||||
text_completion_response["usage"] = response["usage"]
|
text_completion_response["usage"] = response.get("usage", None)
|
||||||
return text_completion_response
|
return text_completion_response
|
||||||
|
|
||||||
##### Moderation #######################
|
##### Moderation #######################
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue