(test) strict test ft:gpt-3.5 cost calc

This commit is contained in:
ishaan-jaff 2023-11-23 14:11:01 -08:00
parent 46fc8be4c8
commit 90a9116a17

View file

@ -5,7 +5,9 @@ sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import time
import litellm
from litellm import get_max_tokens, model_cost, open_ai_chat_completion_models
import pytest
def test_get_gpt3_tokens():
max_tokens = get_max_tokens("gpt-3.5-turbo")
@ -26,4 +28,31 @@ def test_zephyr_hf_tokens():
print(max_tokens)
assert max_tokens == 32768
test_zephyr_hf_tokens()
test_zephyr_hf_tokens()
def test_cost_ft_gpt_35():
try:
# this tests if litellm.completion_cost can calculate cost for ft:gpt-3.5-turbo:my-org:custom_suffix:id
# it needs to lookup ft:gpt-3.5-turbo in the litellm model_cost map to get the correct cost
from litellm import ModelResponse, Choices, Message
from litellm.utils import Usage
resp = ModelResponse(
id='chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac',
choices=[Choices(finish_reason=None, index=0,
message=Message(content=' Sure! Here is a short poem about the sky:\n\nA canvas of blue, a', role='assistant'))],
created=1700775391,
model='ft:gpt-3.5-turbo:my-org:custom_suffix:id',
object='chat.completion', system_fingerprint=None,
usage=Usage(prompt_tokens=21, completion_tokens=17, total_tokens=38)
)
cost = litellm.completion_cost(completion_response=resp)
print("\n Calculated Cost for ft:gpt-3.5", cost)
input_cost = model_cost["ft:gpt-3.5-turbo"]["input_cost_per_token"]
output_cost = model_cost["ft:gpt-3.5-turbo"]["output_cost_per_token"]
expected_cost = (input_cost*resp.usage.prompt_tokens) + (output_cost*resp.usage.completion_tokens)
print("\n Excpected cost", expected_cost)
assert cost == expected_cost
except Exception as e:
pytest.fail(f"Cost Calc failed for ft:gpt-3.5. Expected {expected_cost}, Calculated cost {cost}")
test_cost_ft_gpt_35()