From 2a4a612373d521d064f0b2ca0e7814748815da53 Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Fri, 7 Feb 2025 12:47:02 -0500 Subject: [PATCH] fix: Ensure a better error stack trace when llama-stack is not built (#950) # What does this PR do? currently this is the output when you run a distribution locally without running `llama stack build`: ``` Traceback (most recent call last): File "/Users/charliedoern/Documents/llama-sdk.py", line 25, in models = client.models.list() ^^^^^^^^^^^^^^^^^^^^ File "/Users/charliedoern/Documents/llama-stack-client-python/src/llama_stack_client/resources/models.py", line 107, in list raise exc File "/Users/charliedoern/Documents/llama-stack-client-python/src/llama_stack_client/resources/models.py", line 95, in list return self._get( ^^^^^^^^^^ File "/Users/charliedoern/Documents/llama-stack-client-python/src/llama_stack_client/_base_client.py", line 1212, in get return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/charliedoern/Documents/llama-stack/llama_stack/distribution/library_client.py", line 168, in request return asyncio.run(self.async_client.request(*args, **kwargs)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.10/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/runners.py", line 190, in run return runner.run(main) ^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.10/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/runners.py", line 118, in run return self._loop.run_until_complete(task) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/python@3.11/3.11.10/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 654, in run_until_complete return future.result() ^^^^^^^^^^^^^^^ File "/Users/charliedoern/Documents/llama-stack/llama_stack/distribution/library_client.py", line 258, in request if not self.endpoint_impls: ^^^^^^^^^^^^^^^^^^^ AttributeError: 'AsyncLlamaStackAsLibraryClient' object has no attribute 'endpoint_impls' ``` the intended exception is never raised, add an except for an AttributeError so users can catch when they call things like `models.list()` and so that a more useful error telling them that the client is not properly initialized is printed. ## Test Plan Please describe: - I ran the script found here: https://llama-stack.readthedocs.io/en/latest/getting_started/index.html#run-inference-with-python-sdk locally with the changes in this PR and the exception was caught successfully. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. --------- Signed-off-by: Charlie Doern Co-authored-by: Ashwin Bharambe --- llama_stack/distribution/library_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/llama_stack/distribution/library_client.py b/llama_stack/distribution/library_client.py index 13aa67956..d4a7cde7e 100644 --- a/llama_stack/distribution/library_client.py +++ b/llama_stack/distribution/library_client.py @@ -198,6 +198,7 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient): async def initialize(self) -> bool: try: + self.endpoint_impls = None self.impls = await construct_stack(self.config, self.custom_provider_registry) except ModuleNotFoundError as _e: cprint(_e.msg, "red")