forked from phoenix/litellm-mirror
Merge pull request #4765 from skucherlapati/bug_4757_add_medlm_to_cost_calc
Fix failing tests on PR-4760
This commit is contained in:
commit
e7372954a4
1 changed files with 14 additions and 3 deletions
|
@ -707,12 +707,23 @@ def test_vertex_ai_completion_cost():
|
|||
|
||||
|
||||
def test_vertex_ai_medlm_completion_cost():
|
||||
model="medlm-medium"
|
||||
"""Test for medlm completion cost."""
|
||||
|
||||
with pytest.raises(Exception) as e:
|
||||
model="vertex_ai/medlm-medium"
|
||||
messages = [{"role": "user", "content": "Test MedLM completion cost."}]
|
||||
predictive_cost = completion_cost(model=model, messages=messages, custom_llm_provider="vertex_ai")
|
||||
|
||||
|
||||
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
|
||||
litellm.model_cost = litellm.get_model_cost_map(url="")
|
||||
|
||||
model="vertex_ai/medlm-medium"
|
||||
messages = [{"role": "user", "content": "Test MedLM completion cost."}]
|
||||
predictive_cost = completion_cost(model=model, messages=messages)
|
||||
predictive_cost = completion_cost(model=model, messages=messages, custom_llm_provider="vertex_ai")
|
||||
assert predictive_cost > 0
|
||||
|
||||
model="medlm-large"
|
||||
model="vertex_ai/medlm-large"
|
||||
messages = [{"role": "user", "content": "Test MedLM completion cost."}]
|
||||
predictive_cost = completion_cost(model=model, messages=messages)
|
||||
assert predictive_cost > 0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue