diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 5a78c07cc..12902996b 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -307,9 +307,10 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate): if model.model_type == ModelType.embedding: logger.info(f"Pulling embedding model `{model.provider_resource_id}` if necessary...") await self.client.pull(model.provider_resource_id) - response = await self.client.list() - else: - response = await self.client.ps() + # we use list() here instead of ps() - + # - ps() only lists running models, not available models + # - models not currently running are run by the ollama server as needed + response = await self.client.list() available_models = [m["model"] for m in response["models"]] if model.provider_resource_id not in available_models: raise ValueError(