mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
fixing claude max token testing
This commit is contained in:
parent
dd61a5b35e
commit
54409a2a30
5 changed files with 17 additions and 8 deletions
|
@ -60,6 +60,8 @@ def completion(
|
|||
prompt += f"{AnthropicConstants.HUMAN_PROMPT.value}{message['content']}"
|
||||
prompt += f"{AnthropicConstants.AI_PROMPT.value}"
|
||||
max_tokens_to_sample = optional_params.get("max_tokens_to_sample", 256) # required anthropic param, default to 256 if user does not provide an input
|
||||
if max_tokens_to_sample != 256: # not default - print for testing
|
||||
print_verbose(f"LiteLLM.Anthropic: Max Tokens Set")
|
||||
data = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue