forked from phoenix/litellm-mirror
fix linting
This commit is contained in:
parent
be5a92c40a
commit
d70cff8713
1 changed files with 3 additions and 5 deletions
|
@ -2445,7 +2445,7 @@ def shorten_message_to_fit_limit(
|
||||||
def safe_messages(
|
def safe_messages(
|
||||||
messages,
|
messages,
|
||||||
model = None,
|
model = None,
|
||||||
system_message = None,
|
system_message = None, # str of user system message
|
||||||
trim_ratio: float = 0.75,
|
trim_ratio: float = 0.75,
|
||||||
return_response_tokens: bool = False,
|
return_response_tokens: bool = False,
|
||||||
max_tokens = None
|
max_tokens = None
|
||||||
|
@ -2487,13 +2487,11 @@ def safe_messages(
|
||||||
#### Trimming messages if current_tokens > max_tokens
|
#### Trimming messages if current_tokens > max_tokens
|
||||||
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
|
print_verbose(f"Need to trim input messages: {messages}, current_tokens{current_tokens}, max_tokens: {max_tokens}")
|
||||||
if system_message:
|
if system_message:
|
||||||
system_message_event, max_tokens = process_system_message(messages=messages, max_tokens=max_tokens, model=model)
|
system_message_event, max_tokens = process_system_message(system_message=system_message, max_tokens=max_tokens, model=model)
|
||||||
|
messages = messages + [system_message_event]
|
||||||
|
|
||||||
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
|
final_messages = process_messages(messages=messages, max_tokens=max_tokens, model=model)
|
||||||
|
|
||||||
if system_message:
|
|
||||||
final_messages = [system_message_event] + final_messages
|
|
||||||
|
|
||||||
if return_response_tokens: # if user wants token count with new trimmed messages
|
if return_response_tokens: # if user wants token count with new trimmed messages
|
||||||
response_tokens = max_tokens - get_token_count(final_messages, model)
|
response_tokens = max_tokens - get_token_count(final_messages, model)
|
||||||
return final_messages, response_tokens
|
return final_messages, response_tokens
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue