diff --git a/litellm/main.py b/litellm/main.py index 35ff3f7b95..a37d8f1eb1 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2206,7 +2206,7 @@ def completion( # type: ignore # noqa: PLR0915 data = {"model": model, "messages": messages, **optional_params} ## COMPLETION CALL - response = openai_chat_completions.completion( + response = openai_like_chat_completion.completion( model=model, messages=messages, headers=headers, @@ -2221,6 +2221,8 @@ def completion( # type: ignore # noqa: PLR0915 acompletion=acompletion, timeout=timeout, # type: ignore custom_llm_provider="openrouter", + custom_prompt_dict=custom_prompt_dict, + encoding=encoding, ) ## LOGGING logging.post_call( diff --git a/tests/local_testing/test_completion.py b/tests/local_testing/test_completion.py index dd924a6d5d..a8359b43c4 100644 --- a/tests/local_testing/test_completion.py +++ b/tests/local_testing/test_completion.py @@ -2605,6 +2605,21 @@ def test_completion_openrouter1(): pytest.fail(f"Error occurred: {e}") +def test_completion_openrouter_reasoning_effort(): + try: + litellm.set_verbose = True + response = completion( + model="openrouter/deepseek/deepseek-r1", + messages=messages, + include_reasoning=True, + max_tokens=5, + ) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + # test_completion_openrouter1() diff --git a/tests/local_testing/test_get_model_info.py b/tests/local_testing/test_get_model_info.py index 910158cddd..edf9183ad2 100644 --- a/tests/local_testing/test_get_model_info.py +++ b/tests/local_testing/test_get_model_info.py @@ -376,3 +376,24 @@ def test_get_model_info_huggingface_models(monkeypatch): providers=["huggingface"], **info, ) + + +@pytest.mark.parametrize( + "model, provider", + [ + ("bedrock/us-east-2/us.anthropic.claude-3-haiku-20240307-v1:0", None), + ( + "bedrock/us-east-2/us.anthropic.claude-3-haiku-20240307-v1:0", + "bedrock", + ), + ], +) +def test_get_model_info_cost_calculator_bedrock_region_cris_stripped(model, provider): + """ + ensure cross region inferencing model is used correctly + Relevant Issue: https://github.com/BerriAI/litellm/issues/8115 + """ + info = get_model_info(model=model, custom_llm_provider=provider) + print("info", info) + assert info["key"] == "us.anthropic.claude-3-haiku-20240307-v1:0" + assert info["litellm_provider"] == "bedrock"