forked from phoenix/litellm-mirror
make default max tokens a controllable param
This commit is contained in:
parent
6cd1960b82
commit
a964e326f1
2 changed files with 2 additions and 2 deletions
|
@ -2,7 +2,7 @@ success_callback = []
|
||||||
failure_callback = []
|
failure_callback = []
|
||||||
set_verbose=False
|
set_verbose=False
|
||||||
telemetry=True
|
telemetry=True
|
||||||
|
max_tokens = 256 # OpenAI Defaults
|
||||||
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
|
####### PROXY PARAMS ################### configurable params if you use proxy models like Helicone
|
||||||
api_base = None
|
api_base = None
|
||||||
headers = None
|
headers = None
|
||||||
|
|
|
@ -189,7 +189,7 @@ def completion(
|
||||||
if max_tokens != float('inf'):
|
if max_tokens != float('inf'):
|
||||||
max_tokens_to_sample = max_tokens
|
max_tokens_to_sample = max_tokens
|
||||||
else:
|
else:
|
||||||
max_tokens_to_sample = 300 # default in Anthropic docs https://docs.anthropic.com/claude/reference/client-libraries
|
max_tokens_to_sample = litellm.max_tokens # default in Anthropic docs https://docs.anthropic.com/claude/reference/client-libraries
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue