diff --git a/llama_stack/providers/adapters/inference/ollama/__init__.py b/llama_stack/providers/adapters/inference/ollama/__init__.py index 013b6c8d3..0b5e35a09 100644 --- a/llama_stack/providers/adapters/inference/ollama/__init__.py +++ b/llama_stack/providers/adapters/inference/ollama/__init__.py @@ -15,12 +15,6 @@ async def get_adapter_impl(config: RemoteProviderConfig, _deps): from .ollama import OllamaInferenceAdapter impl = OllamaInferenceAdapter(config.url) - - routing_key = _deps.get("routing_key") - if not routing_key: - raise ValueError( - "Routing key is required for the Ollama adapter but was not found." - ) - - await impl.initialize(routing_key) + impl._deps = _deps + await impl.initialize() return impl diff --git a/llama_stack/providers/adapters/inference/ollama/ollama.py b/llama_stack/providers/adapters/inference/ollama/ollama.py index 47abb9a98..f9dca8d40 100644 --- a/llama_stack/providers/adapters/inference/ollama/ollama.py +++ b/llama_stack/providers/adapters/inference/ollama/ollama.py @@ -45,10 +45,15 @@ class OllamaInferenceAdapter(Inference, RoutableProviderForModels): def client(self) -> AsyncClient: return AsyncClient(host=self.url) - async def initialize(self, routing_key: str) -> None: + async def initialize(self) -> None: print("Initializing Ollama, checking connectivity to server...") try: await self.client.ps() + routing_key = self._deps.get("routing_key") + if not routing_key: + raise ValueError( + "Routing key is required for the Ollama adapter but was not found." + ) ollama_model = self.map_to_provider_model(routing_key) print(f"Connected to Ollama server. Pre-downloading {ollama_model}...") await self.predownload_models(ollama_model)