mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
rename safe_messages to trim_messages
This commit is contained in:
parent
7d41e701a6
commit
dda87a55ff
3 changed files with 12 additions and 12 deletions
|
@ -10,14 +10,14 @@ sys.path.insert(
|
|||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm
|
||||
from litellm.utils import safe_messages, get_token_count
|
||||
from litellm.utils import trim_messages, get_token_count
|
||||
|
||||
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
|
||||
|
||||
# Test 1: Check trimming of normal message
|
||||
def test_basic_trimming():
|
||||
messages = [{"role": "user", "content": "This is a long message that definitely exceeds the token limit."}]
|
||||
trimmed_messages = safe_messages(messages, model="claude-2", max_tokens=8)
|
||||
trimmed_messages = trim_messages(messages, model="claude-2", max_tokens=8)
|
||||
print("trimmed messages")
|
||||
print(trimmed_messages)
|
||||
# print(get_token_count(messages=trimmed_messages, model="claude-2"))
|
||||
|
@ -26,7 +26,7 @@ test_basic_trimming()
|
|||
|
||||
def test_basic_trimming_no_max_tokens_specified():
|
||||
messages = [{"role": "user", "content": "This is a long message that is definitely under the token limit."}]
|
||||
trimmed_messages = safe_messages(messages, model="gpt-4")
|
||||
trimmed_messages = trim_messages(messages, model="gpt-4")
|
||||
print("trimmed messages for gpt-4")
|
||||
print(trimmed_messages)
|
||||
# print(get_token_count(messages=trimmed_messages, model="claude-2"))
|
||||
|
@ -38,7 +38,7 @@ def test_multiple_messages_trimming():
|
|||
{"role": "user", "content": "This is a long message that will exceed the token limit."},
|
||||
{"role": "user", "content": "This is another long message that will also exceed the limit."}
|
||||
]
|
||||
trimmed_messages = safe_messages(messages=messages, model="gpt-3.5-turbo", max_tokens=20)
|
||||
trimmed_messages = trim_messages(messages=messages, model="gpt-3.5-turbo", max_tokens=20)
|
||||
print("Trimmed messages")
|
||||
print(trimmed_messages)
|
||||
# print(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo"))
|
||||
|
@ -50,7 +50,7 @@ def test_multiple_messages_no_trimming():
|
|||
{"role": "user", "content": "This is a long message that will exceed the token limit."},
|
||||
{"role": "user", "content": "This is another long message that will also exceed the limit."}
|
||||
]
|
||||
trimmed_messages = safe_messages(messages=messages, model="gpt-3.5-turbo", max_tokens=100)
|
||||
trimmed_messages = trim_messages(messages=messages, model="gpt-3.5-turbo", max_tokens=100)
|
||||
print("Trimmed messages")
|
||||
print(trimmed_messages)
|
||||
assert(messages==trimmed_messages)
|
||||
|
@ -60,7 +60,7 @@ test_multiple_messages_no_trimming()
|
|||
|
||||
def test_large_trimming():
|
||||
messages = [{"role": "user", "content": "This is a singlelongwordthatexceedsthelimit."}, {"role": "user", "content": "This is a singlelongwordthatexceedsthelimit."},{"role": "user", "content": "This is a singlelongwordthatexceedsthelimit."},{"role": "user", "content": "This is a singlelongwordthatexceedsthelimit."},{"role": "user", "content": "This is a singlelongwordthatexceedsthelimit."}]
|
||||
trimmed_messages = safe_messages(messages, max_tokens=20, model="random")
|
||||
trimmed_messages = trim_messages(messages, max_tokens=20, model="random")
|
||||
print("trimmed messages")
|
||||
print(trimmed_messages)
|
||||
assert(get_token_count(messages=trimmed_messages, model="random")) <= 20
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue