forked from phoenix-oss/llama-stack-mirror
refactor(ollama): model availability check (#986)
# What does this PR do? Moved model availability check logic into a dedicated check_model_availability function. Eliminated redundant code by reusing the helper function in both embedding and non-embedding model registration. Signed-off-by: Sébastien Han <seb@redhat.com> ## Test Plan Run Ollama and serve 2 models to get most the unit test pass: ``` ollama run llama3.2:3b-instruct-fp16 --keepalive 2m & ollama run llama3.1:8b --keepalive 2m & ``` Run the unit test: ``` uv run pytest -v -k "ollama" --inference-model=llama3.2:3b-instruct-fp16 llama_stack/providers/tests/inference/test_model_registration.py /Users/leseb/Documents/AI/llama-stack/.venv/lib/python3.13/site-packages/pytest_asyncio/plugin.py:207: PytestDeprecationWarning: The configuration option "asyncio_default_fixture_loop_scope" is unset. The event loop scope for asynchronous fixtures will default to the fixture caching scope. Future versions of pytest-asyncio will default the loop scope for asynchronous fixtures to function scope. Set the default fixture loop scope explicitly in order to avoid unexpected behavior in the future. Valid fixture loop scopes are: "function", "class", "module", "package", "session" warnings.warn(PytestDeprecationWarning(_DEFAULT_FIXTURE_LOOP_SCOPE_UNSET)) ============================================ test session starts ============================================= platform darwin -- Python 3.13.1, pytest-8.3.4, pluggy-1.5.0 -- /Users/leseb/Documents/AI/llama-stack/.venv/bin/python3 cachedir: .pytest_cache metadata: {'Python': '3.13.1', 'Platform': 'macOS-15.3-arm64-arm-64bit-Mach-O', 'Packages': {'pytest': '8.3.4', 'pluggy': '1.5.0'}, 'Plugins': {'html': '4.1.1', 'metadata': '3.1.1', 'asyncio': '0.25.3', 'anyio': '4.8.0', 'nbval': '0.11.0'}} rootdir: /Users/leseb/Documents/AI/llama-stack configfile: pyproject.toml plugins: html-4.1.1, metadata-3.1.1, asyncio-0.25.3, anyio-4.8.0, nbval-0.11.0 asyncio: mode=Mode.STRICT, asyncio_default_fixture_loop_scope=None collected 65 items / 60 deselected / 5 selected llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_register_unsupported_model[-ollama] PASSED [ 20%] llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_register_nonexistent_model[-ollama] PASSED [ 40%] llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_register_with_llama_model[-ollama] FAILED [ 60%] llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_initialize_model_during_registering[-ollama] FAILED [ 80%] llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_register_with_invalid_llama_model[-ollama] PASSED [100%] ================================================== FAILURES ================================================== _______________________ TestModelRegistration.test_register_with_llama_model[-ollama] ________________________ llama_stack/providers/tests/inference/test_model_registration.py:54: in test_register_with_llama_model _ = await models_impl.register_model( llama_stack/providers/utils/telemetry/trace_protocol.py:91: in async_wrapper result = await method(self, *args, **kwargs) llama_stack/distribution/routers/routing_tables.py:245: in register_model registered_model = await self.register_object(model) llama_stack/distribution/routers/routing_tables.py:192: in register_object registered_obj = await register_object_with_provider(obj, p) llama_stack/distribution/routers/routing_tables.py:53: in register_object_with_provider return await p.register_model(obj) llama_stack/providers/utils/telemetry/trace_protocol.py:91: in async_wrapper result = await method(self, *args, **kwargs) llama_stack/providers/remote/inference/ollama/ollama.py:368: in register_model await check_model_availability(model.provider_resource_id) llama_stack/providers/remote/inference/ollama/ollama.py:359: in check_model_availability raise ValueError( E ValueError: Model 'custom-model' is not available in Ollama. Available models: llama3.1:8b, llama3.2:3b-instruct-fp16 __________________ TestModelRegistration.test_initialize_model_during_registering[-ollama] ___________________ llama_stack/providers/tests/inference/test_model_registration.py:85: in test_initialize_model_during_registering mock_load_model.assert_called_once() /opt/homebrew/Cellar/python@3.13/3.13.1/Frameworks/Python.framework/Versions/3.13/lib/python3.13/unittest/mock.py:956: in assert_called_once raise AssertionError(msg) E AssertionError: Expected 'load_model' to have been called once. Called 0 times. -------------------------------------------- Captured stderr call -------------------------------------------- W0207 11:55:26.777000 90854 .venv/lib/python3.13/site-packages/torch/distributed/elastic/multiprocessing/redirects.py:29] NOTE: Redirects are currently not supported in Windows or MacOs. ========================================== short test summary info =========================================== FAILED llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_register_with_llama_model[-ollama] - ValueError: Model 'custom-model' is not available in Ollama. Available models: llama3.1:8b, llama3.2:3b-i... FAILED llama_stack/providers/tests/inference/test_model_registration.py::TestModelRegistration::test_initialize_model_during_registering[-ollama] - AssertionError: Expected 'load_model' to have been called once. Called 0 times. =========================== 2 failed, 3 passed, 60 deselected, 2 warnings in 1.84s =========================== ``` We only "care" about the `test_register_nonexistent_model` for this code. ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
2a4a612373
commit
316c43fdaf
1 changed files with 9 additions and 13 deletions
|
@ -352,24 +352,20 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
return EmbeddingsResponse(embeddings=embeddings)
|
||||
|
||||
async def register_model(self, model: Model) -> Model:
|
||||
# ollama does not have embedding models running. Check if the model is in list of available models.
|
||||
if model.model_type == ModelType.embedding:
|
||||
response = await self.client.list()
|
||||
async def check_model_availability(model_id: str):
|
||||
response = await self.client.ps()
|
||||
available_models = [m["model"] for m in response["models"]]
|
||||
if model.provider_resource_id not in available_models:
|
||||
if model_id not in available_models:
|
||||
raise ValueError(
|
||||
f"Model '{model.provider_resource_id}' is not available in Ollama. "
|
||||
f"Available models: {', '.join(available_models)}"
|
||||
f"Model '{model_id}' is not available in Ollama. Available models: {', '.join(available_models)}"
|
||||
)
|
||||
|
||||
if model.model_type == ModelType.embedding:
|
||||
await check_model_availability(model.provider_resource_id)
|
||||
return model
|
||||
|
||||
model = await self.register_helper.register_model(model)
|
||||
models = await self.client.ps()
|
||||
available_models = [m["model"] for m in models["models"]]
|
||||
if model.provider_resource_id not in available_models:
|
||||
raise ValueError(
|
||||
f"Model '{model.provider_resource_id}' is not available in Ollama. "
|
||||
f"Available models: {', '.join(available_models)}"
|
||||
)
|
||||
await check_model_availability(model.provider_resource_id)
|
||||
|
||||
return model
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue