From 090acfc458d22016f1a5815e77eacfacdb4ef2a1 Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Tue, 12 Aug 2025 16:07:54 -0400 Subject: [PATCH] fix: better error message when db is out of date MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit currently if you 1. `export OLLAMA_URL=http://localhost:11434` 2. `llama stack run --image-type venv starter` 3. do some chat completions successfully 4. kill the server 5. unset OLLAMA_URL 6. `llama stack run --image-type venv starter` 7. do some more chat completions you get errors like: ``` File "/Users/charliedoern/projects/Documents/llama-stack/llama_stack/core/routing_tables/models.py", line 66, in get_provider_impl return self.impls_by_provider_id ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^ KeyError: 'ollama' ``` and in the client: ``` INFO:httpx:HTTP Request: POST http://localhost:8321/v1/openai/v1/chat/completions "HTTP/1.1 500 Internal Server Error" INFO:llama_stack_client._base_client:Retrying request to /v1/openai/v1/chat/completions in 0.482010 seconds INFO:httpx:HTTP Request: POST http://localhost:8321/v1/openai/v1/chat/completions "HTTP/1.1 500 Internal Server Error" INFO:llama_stack_client._base_client:Retrying request to /v1/openai/v1/chat/completions in 0.883701 seconds INFO:httpx:HTTP Request: POST http://localhost:8321/v1/openai/v1/chat/completions "HTTP/1.1 500 Internal Server Error" ╭───────────────────────────────────────────────────────────────────────────────────────────────╮ │ Failed to inference chat-completion │ │ │ │ Error Type: InternalServerError │ │ Details: Error code: 500 - {'detail': 'Internal server error: An unexpected error occurred.'} │ ╰───────────────────────────────────────────────────────────────────────────────────────────────╯ ``` now you get ``` File "/Users/charliedoern/projects/Documents/llama-stack/llama_stack/core/routing_tables/models.py", line 69, in get_provider_impl raise ValueError( ValueError: Provider ID not found in currently running providers. Usually this indicates that your registry.db is out of date. Please ensure that the databases associated with your distro are not out of date. INFO 2025-08-12 16:07:40,677 console_span_processor:62 telemetry: 20:07:40.628 [INFO] ::1:55414 - "POST /v1/openai/v1/chat/completions HTTP/1.1" 400 ``` and in the client: ``` Failed to inference chat-completion │ │ │ │ Error Type: BadRequestError │ │ Details: Error code: 400 - {'detail': 'Invalid value: Provider ID not found in currently running providers. Usually this indicates that your registry.db is out of date. Please ensure that the databases associated with your distro are not out of date.'} │ ``` more descriptive and give the user a course of action. Signed-off-by: Charlie Doern --- llama_stack/core/routing_tables/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llama_stack/core/routing_tables/models.py b/llama_stack/core/routing_tables/models.py index c76619271..db6c0a80f 100644 --- a/llama_stack/core/routing_tables/models.py +++ b/llama_stack/core/routing_tables/models.py @@ -63,6 +63,10 @@ class ModelsRoutingTable(CommonRoutingTableImpl, Models): async def get_provider_impl(self, model_id: str) -> Any: model = await lookup_model(self, model_id) + if model.provider_id not in self.impls_by_provider_id: + raise ValueError( + "Provider ID not found in currently running providers. This indicates a data mismatch or configuration issue with your stack." + ) return self.impls_by_provider_id[model.provider_id] async def register_model(