diff --git a/litellm/llms/OpenAI/o1_reasoning.py b/litellm/llms/OpenAI/o1_reasoning.py index dcfe2d06c..6d15319bb 100644 --- a/litellm/llms/OpenAI/o1_reasoning.py +++ b/litellm/llms/OpenAI/o1_reasoning.py @@ -65,7 +65,9 @@ class OpenAIO1Config(OpenAIConfig): param for param in all_openai_params if param not in non_supported_params ] - def map_openai_params(self, non_default_params: dict, optional_params: dict): + def map_openai_params( + self, non_default_params: dict, optional_params: dict, model: str + ): for param, value in non_default_params.items(): if param == "max_tokens": optional_params["max_completion_tokens"] = value diff --git a/litellm/llms/OpenAI/openai.py b/litellm/llms/OpenAI/openai.py index d90c04b62..89f397032 100644 --- a/litellm/llms/OpenAI/openai.py +++ b/litellm/llms/OpenAI/openai.py @@ -573,6 +573,7 @@ class OpenAIConfig: return litellm.OpenAIO1Config().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, + model=model, ) supported_openai_params = self.get_supported_openai_params(model) for param, value in non_default_params.items(): diff --git a/litellm/tests/test_openai_o1.py b/litellm/tests/test_openai_o1.py index 7c450d7e7..39dadc96e 100644 --- a/litellm/tests/test_openai_o1.py +++ b/litellm/tests/test_openai_o1.py @@ -50,4 +50,3 @@ async def test_o1_handle_system_role(respx_mock: MockRouter): print(f"response: {response}") assert isinstance(response, ModelResponse) - assert response.choices[0].message.content == "Mocked response"