diff --git a/litellm/tests/test_anthropic_completion.py b/litellm/tests/test_anthropic_completion.py index cac0945d8..15d150a56 100644 --- a/litellm/tests/test_anthropic_completion.py +++ b/litellm/tests/test_anthropic_completion.py @@ -48,6 +48,42 @@ def test_anthropic_completion_input_translation(): ] +def test_anthropic_completion_input_translation_with_metadata(): + """ + Tests that cost tracking works as expected with LiteLLM Proxy + + LiteLLM Proxy will insert litellm_metadata for anthropic endpoints to track user_api_key and user_api_key_team_id + + This test ensures that the `litellm_metadata` is not present in the translated input + It ensures that `litellm.acompletion()` will receieve metadata which is a litellm specific param + """ + data = { + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Hey, how's it going?"}], + "litellm_metadata": { + "user_api_key": "88dc28d0f030c55ed4ab77ed8faf098196cb1c05df778539800c9f1243fe6b4b", + "user_api_key_alias": None, + "user_api_end_user_max_budget": None, + "litellm_api_version": "1.40.19", + "global_max_parallel_requests": None, + "user_api_key_user_id": "default_user_id", + "user_api_key_org_id": None, + "user_api_key_team_id": None, + "user_api_key_team_alias": None, + "user_api_key_team_max_budget": None, + "user_api_key_team_spend": None, + "user_api_key_spend": 0.0, + "user_api_key_max_budget": None, + "user_api_key_metadata": {}, + }, + } + translated_input = anthropic_adapter.translate_completion_input_params(kwargs=data) + + assert "litellm_metadata" not in translated_input + assert "metadata" in translated_input + assert translated_input["metadata"] == data["litellm_metadata"] + + def test_anthropic_completion_e2e(): litellm.set_verbose = True