From ce474ff00838a3933f10b79b3d5a92d8f964c926 Mon Sep 17 00:00:00 2001 From: skucherlapati Date: Wed, 17 Jul 2024 19:32:17 -0700 Subject: [PATCH] fix failing tests on PR-4760 --- litellm/tests/test_completion_cost.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/litellm/tests/test_completion_cost.py b/litellm/tests/test_completion_cost.py index 761bd054c..5544611e6 100644 --- a/litellm/tests/test_completion_cost.py +++ b/litellm/tests/test_completion_cost.py @@ -707,12 +707,23 @@ def test_vertex_ai_completion_cost(): def test_vertex_ai_medlm_completion_cost(): - model="medlm-medium" + """Test for medlm completion cost.""" + + with pytest.raises(Exception) as e: + model="vertex_ai/medlm-medium" + messages = [{"role": "user", "content": "Test MedLM completion cost."}] + predictive_cost = completion_cost(model=model, messages=messages, custom_llm_provider="vertex_ai") + + + os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True" + litellm.model_cost = litellm.get_model_cost_map(url="") + + model="vertex_ai/medlm-medium" messages = [{"role": "user", "content": "Test MedLM completion cost."}] - predictive_cost = completion_cost(model=model, messages=messages) + predictive_cost = completion_cost(model=model, messages=messages, custom_llm_provider="vertex_ai") assert predictive_cost > 0 - model="medlm-large" + model="vertex_ai/medlm-large" messages = [{"role": "user", "content": "Test MedLM completion cost."}] predictive_cost = completion_cost(model=model, messages=messages) assert predictive_cost > 0