forked from phoenix/litellm-mirror
Added simple test of get_supported_openai_params with no custom provider
This commit is contained in:
parent
cb5456c9f5
commit
8be37bee04
1 changed files with 5 additions and 0 deletions
|
@ -23,6 +23,7 @@ from litellm.utils import (
|
|||
create_pretrained_tokenizer,
|
||||
create_tokenizer,
|
||||
get_max_tokens,
|
||||
get_supported_openai_params,
|
||||
)
|
||||
|
||||
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
|
||||
|
@ -386,3 +387,7 @@ def test_get_max_token_unit_test():
|
|||
) # Returns a number instead of throwing an Exception
|
||||
|
||||
assert isinstance(max_tokens, int)
|
||||
|
||||
|
||||
def test_get_supported_openai_params() -> None:
|
||||
assert isinstance(get_supported_openai_params("gpt-4"), list)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue