mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
[Feat] Add max_completion_tokens
param (#5691)
* add max_completion_tokens * add max_completion_tokens * add max_completion_tokens support for OpenAI models * add max_completion_tokens param * add max_completion_tokens for bedrock converse models * add test for converse maxTokens * fix openai o1 param mapping test * move test optional params * add max_completion_tokens for anthropic api * fix conftest * add max_completion tokens for vertex ai partner models * add max_completion_tokens for fireworks ai * add max_completion_tokens for hf rest api * add test for param mapping * add param mapping for vertex, gemini + testing * predibase is the most unstable and unusable llm api in prod, can't handle our ci/cd * add max_completion_tokens to openai supported params * fix fireworks ai param mapping
This commit is contained in:
parent
415a3ede9e
commit
85acdb9193
31 changed files with 591 additions and 35 deletions
|
@ -60,6 +60,7 @@ class VolcEngineConfig:
|
|||
"logit_bias",
|
||||
"logprobs",
|
||||
"top_logprobs",
|
||||
"max_completion_tokens",
|
||||
"max_tokens",
|
||||
"n",
|
||||
"presence_penalty",
|
||||
|
@ -82,6 +83,8 @@ class VolcEngineConfig:
|
|||
) -> dict:
|
||||
supported_openai_params = self.get_supported_openai_params(model)
|
||||
for param, value in non_default_params.items():
|
||||
if param in supported_openai_params:
|
||||
if param == "max_completion_tokens":
|
||||
optional_params["max_tokens"] = value
|
||||
elif param in supported_openai_params:
|
||||
optional_params[param] = value
|
||||
return optional_params
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue