forked from phoenix/litellm-mirror
include methods in init import, add test, fix encode/decode param ordering
This commit is contained in:
parent
3449a5e446
commit
2d43153efa
5 changed files with 19 additions and 5 deletions
|
@ -608,6 +608,8 @@ from .utils import (
|
||||||
get_optional_params,
|
get_optional_params,
|
||||||
modify_integration,
|
modify_integration,
|
||||||
token_counter,
|
token_counter,
|
||||||
|
create_pretrained_tokenizer,
|
||||||
|
create_tokenizer,
|
||||||
cost_per_token,
|
cost_per_token,
|
||||||
completion_cost,
|
completion_cost,
|
||||||
supports_function_calling,
|
supports_function_calling,
|
||||||
|
|
|
@ -33,6 +33,8 @@ from litellm.utils import (
|
||||||
async_mock_completion_streaming_obj,
|
async_mock_completion_streaming_obj,
|
||||||
convert_to_model_response_object,
|
convert_to_model_response_object,
|
||||||
token_counter,
|
token_counter,
|
||||||
|
create_pretrained_tokenizer,
|
||||||
|
create_tokenizer,
|
||||||
Usage,
|
Usage,
|
||||||
get_optional_params_embeddings,
|
get_optional_params_embeddings,
|
||||||
get_optional_params_image_gen,
|
get_optional_params_image_gen,
|
||||||
|
|
|
@ -9,7 +9,7 @@ sys.path.insert(
|
||||||
0, os.path.abspath("../..")
|
0, os.path.abspath("../..")
|
||||||
) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
import time
|
import time
|
||||||
from litellm import token_counter, encode, decode
|
from litellm import token_counter, create_pretrained_tokenizer, encode, decode
|
||||||
|
|
||||||
|
|
||||||
def test_token_counter_normal_plus_function_calling():
|
def test_token_counter_normal_plus_function_calling():
|
||||||
|
@ -69,15 +69,23 @@ def test_tokenizers():
|
||||||
model="meta-llama/Llama-2-7b-chat", text=sample_text
|
model="meta-llama/Llama-2-7b-chat", text=sample_text
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# llama3 tokenizer (also testing custom tokenizer)
|
||||||
|
llama3_tokens_1 = token_counter(model="meta-llama/llama-3-70b-instruct", text=sample_text)
|
||||||
|
|
||||||
|
llama3_tokenizer = create_pretrained_tokenizer("Xenova/llama-3-tokenizer")
|
||||||
|
llama3_tokens_2 = token_counter(custom_tokenizer=llama3_tokenizer, text=sample_text)
|
||||||
|
|
||||||
print(
|
print(
|
||||||
f"openai tokens: {openai_tokens}; claude tokens: {claude_tokens}; cohere tokens: {cohere_tokens}; llama2 tokens: {llama2_tokens}"
|
f"openai tokens: {openai_tokens}; claude tokens: {claude_tokens}; cohere tokens: {cohere_tokens}; llama2 tokens: {llama2_tokens}; llama3 tokens: {llama3_tokens_1}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# assert that all token values are different
|
# assert that all token values are different
|
||||||
assert (
|
assert (
|
||||||
openai_tokens != cohere_tokens != llama2_tokens
|
openai_tokens != cohere_tokens != llama2_tokens != llama3_tokens_1
|
||||||
), "Token values are not different."
|
), "Token values are not different."
|
||||||
|
|
||||||
|
assert llama3_tokens_1 == llama3_tokens_2, "Custom tokenizer is not being used! It has been configured to use the same tokenizer as the built in llama3 tokenizer and the results should be the same."
|
||||||
|
|
||||||
print("test tokenizer: It worked!")
|
print("test tokenizer: It worked!")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"An exception occured: {e}")
|
pytest.fail(f"An exception occured: {e}")
|
||||||
|
|
|
@ -20,6 +20,8 @@ from litellm.utils import (
|
||||||
validate_environment,
|
validate_environment,
|
||||||
function_to_dict,
|
function_to_dict,
|
||||||
token_counter,
|
token_counter,
|
||||||
|
create_pretrained_tokenizer,
|
||||||
|
create_tokenizer,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
|
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
|
||||||
|
|
|
@ -3696,7 +3696,7 @@ def _select_tokenizer(model: str):
|
||||||
return {"type": "openai_tokenizer", "tokenizer": encoding}
|
return {"type": "openai_tokenizer", "tokenizer": encoding}
|
||||||
|
|
||||||
|
|
||||||
def encode(model: str, custom_tokenizer: Optional[dict] = None, text: str):
|
def encode(model="", text="", custom_tokenizer: Optional[dict] = None):
|
||||||
"""
|
"""
|
||||||
Encodes the given text using the specified model.
|
Encodes the given text using the specified model.
|
||||||
|
|
||||||
|
@ -3713,7 +3713,7 @@ def encode(model: str, custom_tokenizer: Optional[dict] = None, text: str):
|
||||||
return enc
|
return enc
|
||||||
|
|
||||||
|
|
||||||
def decode(model: str, custom_tokenizer: Optional[dict] = None, tokens: List[int]):
|
def decode(model="", tokens: List[int] = [], custom_tokenizer: Optional[dict] = None):
|
||||||
tokenizer_json = custom_tokenizer or _select_tokenizer(model=model)
|
tokenizer_json = custom_tokenizer or _select_tokenizer(model=model)
|
||||||
dec = tokenizer_json["tokenizer"].decode(tokens)
|
dec = tokenizer_json["tokenizer"].decode(tokens)
|
||||||
return dec
|
return dec
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue