fix(utils.py): check for none params

This commit is contained in:
Krrish Dholakia 2023-11-15 13:39:02 -08:00
parent 8cced8eabc
commit 60d2dff74a

View file

@ -1749,15 +1749,15 @@ def get_optional_params( # use the openai defaults
# handle anthropic params
if stream:
optional_params["stream"] = stream
if stop:
if stop is not None:
if type(stop) == str:
stop = [stop] # openai can accept str/list for stop
optional_params["stop_sequences"] = stop
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens:
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
elif custom_llm_provider == "cohere":
## check if unsupported param passed in
@ -1766,21 +1766,21 @@ def get_optional_params( # use the openai defaults
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens:
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if n:
if n is not None:
optional_params["num_generations"] = n
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p:
if top_p is not None:
optional_params["p"] = top_p
if frequency_penalty:
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty:
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "maritalk":
## check if unsupported param passed in
@ -1789,17 +1789,17 @@ def get_optional_params( # use the openai defaults
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens:
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p:
if top_p is not None:
optional_params["p"] = top_p
if presence_penalty:
if presence_penalty is not None:
optional_params["repetition_penalty"] = presence_penalty
if stop:
if stop is not None:
optional_params["stopping_tokens"] = stop
elif custom_llm_provider == "replicate":
## check if unsupported param passed in
@ -1809,18 +1809,18 @@ def get_optional_params( # use the openai defaults
if stream:
optional_params["stream"] = stream
return optional_params
if max_tokens:
if max_tokens is not None:
if "vicuna" in model or "flan" in model:
optional_params["max_length"] = max_tokens
elif "meta/codellama-13b" in model:
optional_params["max_tokens"] = max_tokens
else:
optional_params["max_new_tokens"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "huggingface":
## check if unsupported param passed in
@ -1864,15 +1864,15 @@ def get_optional_params( # use the openai defaults
if stream:
optional_params["stream_tokens"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if max_tokens:
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if frequency_penalty:
if frequency_penalty is not None:
optional_params["repetition_penalty"] = frequency_penalty # https://docs.together.ai/reference/inference
if stop:
if stop is not None:
optional_params["stop"] = stop
elif custom_llm_provider == "ai21":
## check if unsupported param passed in
@ -1881,36 +1881,36 @@ def get_optional_params( # use the openai defaults
if stream:
optional_params["stream"] = stream
if n:
if n is not None:
optional_params["numResults"] = n
if max_tokens:
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["topP"] = top_p
if stop:
if stop is not None:
optional_params["stopSequences"] = stop
if frequency_penalty:
if frequency_penalty is not None:
optional_params["frequencyPenalty"] = {"scale": frequency_penalty}
if presence_penalty:
if presence_penalty is not None:
optional_params["presencePenalty"] = {"scale": presence_penalty}
elif custom_llm_provider == "palm": # https://developers.generativeai.google/tutorials/curl_quickstart
## check if unsupported param passed in
supported_params = ["temperature", "top_p", "stream", "n", "stop", "max_tokens"]
_check_valid_arg(supported_params=supported_params)
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if n:
if n is not None:
optional_params["candidate_count"] = n
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
if max_tokens:
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif (
custom_llm_provider == "vertex_ai"
@ -1919,13 +1919,13 @@ def get_optional_params( # use the openai defaults
supported_params = ["temperature", "top_p", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
if max_tokens:
if max_tokens is not None:
optional_params["max_output_tokens"] = max_tokens
elif custom_llm_provider == "sagemaker":
if "llama-2" in model:
@ -1940,11 +1940,11 @@ def get_optional_params( # use the openai defaults
supported_params = ["temperature", "max_tokens", "stream"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
@ -1958,13 +1958,13 @@ def get_optional_params( # use the openai defaults
_check_valid_arg(supported_params=supported_params)
# params "maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],
# https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=j2-ultra
if max_tokens:
if max_tokens is not None:
optional_params["maxTokens"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
if top_p:
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
@ -1973,13 +1973,13 @@ def get_optional_params( # use the openai defaults
_check_valid_arg(supported_params=supported_params)
# anthropic params on bedrock
# \"max_tokens_to_sample\":300,\"temperature\":0.5,\"top_p\":1,\"stop_sequences\":[\"\\\\n\\\\nHuman:\"]}"
if max_tokens:
if max_tokens is not None:
optional_params["max_tokens_to_sample"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
if stream:
optional_params["stream"] = stream
@ -1987,13 +1987,13 @@ def get_optional_params( # use the openai defaults
supported_params = ["max_tokens", "temperature", "stop", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# see https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=titan-large
if max_tokens:
if max_tokens is not None:
optional_params["maxTokenCount"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if stop:
if stop is not None:
optional_params["stopSequences"] = stop
if top_p:
if top_p is not None:
optional_params["topP"] = top_p
if stream:
optional_params["stream"] = stream
@ -2003,86 +2003,86 @@ def get_optional_params( # use the openai defaults
# handle cohere params
if stream:
optional_params["stream"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if max_tokens:
if max_tokens is not None:
optional_params["max_tokens"] = max_tokens
if n:
if n is not None:
optional_params["num_generations"] = n
if logit_bias != {}:
optional_params["logit_bias"] = logit_bias
if top_p:
if top_p is not None:
optional_params["p"] = top_p
if frequency_penalty:
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if presence_penalty:
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
elif model in litellm.aleph_alpha_models:
supported_params = ["max_tokens", "stream", "top_p", "temperature", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
if max_tokens is not None:
optional_params["maximum_tokens"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty:
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n:
if n is not None:
optional_params["n"] = n
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
elif custom_llm_provider == "ollama":
supported_params = ["max_tokens", "stream", "top_p", "temperature", "frequency_penalty", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
if max_tokens is not None:
optional_params["num_predict"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if frequency_penalty:
if frequency_penalty is not None:
optional_params["repeat_penalty"] = frequency_penalty
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
elif model in litellm.nlp_cloud_models or custom_llm_provider == "nlp_cloud":
supported_params = ["max_tokens", "stream", "temperature", "top_p", "presence_penalty", "frequency_penalty", "n", "stop"]
_check_valid_arg(supported_params=supported_params)
if max_tokens:
if max_tokens is not None:
optional_params["max_length"] = max_tokens
if stream:
optional_params["stream"] = stream
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if presence_penalty:
if presence_penalty is not None:
optional_params["presence_penalty"] = presence_penalty
if frequency_penalty:
if frequency_penalty is not None:
optional_params["frequency_penalty"] = frequency_penalty
if n:
if n is not None:
optional_params["num_return_sequences"] = n
if stop:
if stop is not None:
optional_params["stop_sequences"] = stop
elif model in litellm.petals_models or custom_llm_provider == "petals":
supported_params = ["max_tokens", "temperature", "top_p", "stream"]
_check_valid_arg(supported_params=supported_params)
# max_new_tokens=1,temperature=0.9, top_p=0.6
if max_tokens:
if max_tokens is not None:
optional_params["max_new_tokens"] = max_tokens
if temperature:
if temperature is not None:
optional_params["temperature"] = temperature
if top_p:
if top_p is not None:
optional_params["top_p"] = top_p
if stream:
optional_params["stream"] = stream
@ -2090,7 +2090,7 @@ def get_optional_params( # use the openai defaults
supported_params = ["temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user"]
_check_valid_arg(supported_params=supported_params)
optional_params = non_default_params
if temperature != None:
if temperature is not None:
if temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1": # this model does no support temperature == 0
temperature = 0.0001 # close to 0
optional_params["temperature"] = temperature