mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix(utils.py): move to using litellm.modify_params
to enable max output token trimming fix
This commit is contained in:
parent
bdd2004691
commit
a634424fb2
2 changed files with 2 additions and 1 deletions
|
@ -36,6 +36,7 @@ token: Optional[str] = (
|
|||
telemetry = True
|
||||
max_tokens = 256 # OpenAI Defaults
|
||||
drop_params = False
|
||||
modify_params = False
|
||||
retry = True
|
||||
api_key: Optional[str] = None
|
||||
openai_key: Optional[str] = None
|
||||
|
|
|
@ -2589,7 +2589,7 @@ def client(original_function):
|
|||
if (
|
||||
kwargs.get("max_tokens", None) is not None
|
||||
and model is not None
|
||||
and litellm.drop_params
|
||||
and litellm.modify_params
|
||||
== True # user is okay with params being modified
|
||||
and (
|
||||
call_type == CallTypes.acompletion.value
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue