From 2d7ce81302cb7c4c1a8b6077ecc58745a1cbb460 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Mon, 23 Sep 2024 10:11:43 -0700 Subject: [PATCH] Undo ollama commenting lol --- .../adapters/inference/ollama/ollama.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/llama_stack/providers/adapters/inference/ollama/ollama.py b/llama_stack/providers/adapters/inference/ollama/ollama.py index 296fb61a6..0e6955e7e 100644 --- a/llama_stack/providers/adapters/inference/ollama/ollama.py +++ b/llama_stack/providers/adapters/inference/ollama/ollama.py @@ -30,21 +30,21 @@ OLLAMA_SUPPORTED_SKUS = { class OllamaInferenceAdapter(Inference): def __init__(self, url: str) -> None: self.url = url - # tokenizer = Tokenizer.get_instance() - # self.formatter = ChatFormat(tokenizer) + tokenizer = Tokenizer.get_instance() + self.formatter = ChatFormat(tokenizer) @property def client(self) -> AsyncClient: return AsyncClient(host=self.url) async def initialize(self) -> None: - print("Ollama init") - # try: - # await self.client.ps() - # except httpx.ConnectError as e: - # raise RuntimeError( - # "Ollama Server is not running, start it using `ollama serve` in a separate terminal" - # ) from e + print("Initializing Ollama, checking connectivity to server...") + try: + await self.client.ps() + except httpx.ConnectError as e: + raise RuntimeError( + "Ollama Server is not running, start it using `ollama serve` in a separate terminal" + ) from e async def shutdown(self) -> None: pass