diff --git a/docs/source/getting_started/index.md b/docs/source/getting_started/index.md index 00301d6e7..7929dfa25 100644 --- a/docs/source/getting_started/index.md +++ b/docs/source/getting_started/index.md @@ -98,6 +98,7 @@ llama-stack-client \ Here is a simple example to perform chat completions using the SDK. ```python import os +import sys def create_http_client(): @@ -112,7 +113,9 @@ def create_library_client(template="ollama"): from llama_stack import LlamaStackAsLibraryClient client = LlamaStackAsLibraryClient(template) - client.initialize() + if not client.initialize(): + print("llama stack not built properly") + sys.exit(1) return client diff --git a/llama_stack/distribution/library_client.py b/llama_stack/distribution/library_client.py index 54ae0cf8b..13aa67956 100644 --- a/llama_stack/distribution/library_client.py +++ b/llama_stack/distribution/library_client.py @@ -196,7 +196,7 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient): self.custom_provider_registry = custom_provider_registry self.provider_data = provider_data - async def initialize(self): + async def initialize(self) -> bool: try: self.impls = await construct_stack(self.config, self.custom_provider_registry) except ModuleNotFoundError as _e: