mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(fix) bug fix: completion, text_completion, check if optional params are not None and pass to LLM
This commit is contained in:
parent
f591d79376
commit
b4797bec3b
2 changed files with 11 additions and 10 deletions
|
@ -1923,6 +1923,7 @@ def text_completion(
|
||||||
text_completion_response = TextCompletionResponse()
|
text_completion_response = TextCompletionResponse()
|
||||||
|
|
||||||
optional_params: Dict[str, Any] = {}
|
optional_params: Dict[str, Any] = {}
|
||||||
|
# default values for all optional params are none, litellm only passes them to the llm when they are set to non None values
|
||||||
if best_of is not None:
|
if best_of is not None:
|
||||||
optional_params["best_of"] = best_of
|
optional_params["best_of"] = best_of
|
||||||
if echo is not None:
|
if echo is not None:
|
||||||
|
|
|
@ -1458,23 +1458,23 @@ def get_optional_params( # use the openai defaults
|
||||||
## check if unsupported param passed in
|
## check if unsupported param passed in
|
||||||
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
|
supported_params = ["stream", "temperature", "max_tokens", "top_p", "stop", "n"]
|
||||||
_check_valid_arg(supported_params=supported_params)
|
_check_valid_arg(supported_params=supported_params)
|
||||||
|
# temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
|
||||||
if temperature:
|
if temperature is not None:
|
||||||
optional_params["temperature"] = temperature
|
optional_params["temperature"] = temperature
|
||||||
if top_p:
|
if top_p is not None:
|
||||||
optional_params["top_p"] = top_p
|
optional_params["top_p"] = top_p
|
||||||
if n:
|
if n is not None:
|
||||||
optional_params["best_of"] = n
|
optional_params["best_of"] = n
|
||||||
optional_params["do_sample"] = True # need to sample if you want best of for hf inference endpoints
|
optional_params["do_sample"] = True # Need to sample if you want best of for hf inference endpoints
|
||||||
if stream:
|
if stream is not None:
|
||||||
optional_params["stream"] = stream
|
optional_params["stream"] = stream
|
||||||
if stop:
|
if stop is not None:
|
||||||
optional_params["stop"] = stop
|
optional_params["stop"] = stop
|
||||||
if max_tokens:
|
if max_tokens is not None:
|
||||||
optional_params["max_new_tokens"] = max_tokens
|
optional_params["max_new_tokens"] = max_tokens
|
||||||
if n:
|
if n is not None:
|
||||||
optional_params["best_of"] = n
|
optional_params["best_of"] = n
|
||||||
if presence_penalty:
|
if presence_penalty is not None:
|
||||||
optional_params["repetition_penalty"] = presence_penalty
|
optional_params["repetition_penalty"] = presence_penalty
|
||||||
if "echo" in special_params:
|
if "echo" in special_params:
|
||||||
# https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
|
# https://huggingface.co/docs/huggingface_hub/main/en/package_reference/inference_client#huggingface_hub.InferenceClient.text_generation.decoder_input_details
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue