update nvidia/test_supervised_fine_tuning.py to simulate an available non-llama model

This commit is contained in:
Matthew Farrellee 2025-07-11 13:03:31 -04:00
parent 6c16e2c0fd
commit f4af72d98e

View file

@ -7,7 +7,7 @@
import os
import unittest
import warnings
from unittest.mock import patch
from unittest.mock import AsyncMock, patch
import pytest
@ -342,7 +342,11 @@ class TestNvidiaPostTraining(unittest.TestCase):
provider_resource_id=model_id,
model_type=model_type,
)
result = self.run_async(self.inference_adapter.register_model(model))
# simulate a NIM where default/job-1234 is an available model
with patch.object(self.inference_adapter, "query_available_models", new_callable=AsyncMock) as mock_query:
mock_query.return_value = [model_id]
result = self.run_async(self.inference_adapter.register_model(model))
assert result == model
assert len(self.inference_adapter.alias_to_provider_id_map) > 1
assert self.inference_adapter.get_provider_model_id(model.provider_model_id) == model_id