forked from phoenix/litellm-mirror
remove aws_sagemaker_allow_zero_temp from the parameters passed to inference
This commit is contained in:
parent
b321f2988b
commit
97cf32630d
1 changed files with 1 additions and 0 deletions
|
@ -3134,6 +3134,7 @@ def get_optional_params(
|
||||||
if max_tokens == 0:
|
if max_tokens == 0:
|
||||||
max_tokens = 1
|
max_tokens = 1
|
||||||
optional_params["max_new_tokens"] = max_tokens
|
optional_params["max_new_tokens"] = max_tokens
|
||||||
|
passed_params.pop("aws_sagemaker_allow_zero_temp", None)
|
||||||
elif custom_llm_provider == "bedrock":
|
elif custom_llm_provider == "bedrock":
|
||||||
supported_params = get_supported_openai_params(
|
supported_params = get_supported_openai_params(
|
||||||
model=model, custom_llm_provider=custom_llm_provider
|
model=model, custom_llm_provider=custom_llm_provider
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue