mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
make default max tokens a controllable param
This commit is contained in:
parent
6cd1960b82
commit
a964e326f1
2 changed files with 2 additions and 2 deletions
|
@ -2,7 +2,7 @@ success_callback = []
|
|||
failure_callback = []
|
||||
set_verbose=False
|
||||
telemetry=True
|
||||
|
||||
max_tokens = 256 # OpenAI Defaults
|
||||
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
|
||||
api_base = None
|
||||
headers = None
|
||||
|
|
|
@ -189,7 +189,7 @@ def completion(
|
|||
if max_tokens != float('inf'):
|
||||
max_tokens_to_sample = max_tokens
|
||||
else:
|
||||
max_tokens_to_sample = 300 # default in Anthropic docs https://docs.anthropic.com/claude/reference/client-libraries
|
||||
max_tokens_to_sample = litellm.max_tokens # default in Anthropic docs https://docs.anthropic.com/claude/reference/client-libraries
|
||||
## LOGGING
|
||||
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
||||
## COMPLETION CALL
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue