mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
update nvidia/test_supervised_fine_tuning.py to simulate an available non-llama model
This commit is contained in:
parent
6c16e2c0fd
commit
f4af72d98e
1 changed files with 6 additions and 2 deletions
|
@ -7,7 +7,7 @@
|
|||
import os
|
||||
import unittest
|
||||
import warnings
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
|
@ -342,6 +342,10 @@ class TestNvidiaPostTraining(unittest.TestCase):
|
|||
provider_resource_id=model_id,
|
||||
model_type=model_type,
|
||||
)
|
||||
|
||||
# simulate a NIM where default/job-1234 is an available model
|
||||
with patch.object(self.inference_adapter, "query_available_models", new_callable=AsyncMock) as mock_query:
|
||||
mock_query.return_value = [model_id]
|
||||
result = self.run_async(self.inference_adapter.register_model(model))
|
||||
assert result == model
|
||||
assert len(self.inference_adapter.alias_to_provider_id_map) > 1
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue