fix(text_completion.py): fix routing logic

This commit is contained in:
Krrish Dholakia 2023-11-10 15:46:37 -08:00
parent 11d1651d36
commit 54b4130d54

View file

@ -1929,23 +1929,7 @@ def text_completion(
# get custom_llm_provider
_, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base)
if custom_llm_provider == "text-completion-openai":
# text-davinci-003 and openai text completion models
messages = [{"role": "system", "content": prompt}]
kwargs.pop("prompt", None)
response = completion(
model = model,
messages=messages,
*args,
**kwargs,
**optional_params
)
# assume the response is the openai response object
# return raw response from openai
return response._hidden_params.get("original_response", None)
elif custom_llm_provider == "huggingface":
if custom_llm_provider == "huggingface":
# if echo == True, for TGI llms we need to set top_n_tokens to 3
if echo == True:
# for tgi llms
@ -1982,7 +1966,7 @@ def text_completion(
text_completion_response["choices"] = responses
return text_completion_response
else:
# else:
# check if non default values passed in for best_of, echo, logprobs, suffix
# these are the params supported by Completion() but not ChatCompletion