From 8fe4a216b52baf750af890ecc2f552935be664a1 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Thu, 9 Oct 2025 18:34:39 -0700 Subject: [PATCH] fix(inference): propagate 401/403 errors from remote providers (#3762) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Fixes #2990 Remote provider authentication errors (401/403) were being converted to 500 Internal Server Error, preventing users from understanding why their requests failed. ## The Problem When a request with an invalid API key was sent to a remote provider: - Provider correctly returns 401 with error details - Llama Stack's `translate_exception()` didn't recognize provider SDK exceptions - Fell through to generic 500 error handler - User received: "Internal server error: An unexpected error occurred." ## The Fix Added handler in `translate_exception()` that checks for exceptions with a `status_code` attribute and preserves the original HTTP status code and error message. **Before:** ```json HTTP 500 {"detail": "Internal server error: An unexpected error occurred."} ``` **After:** ```json HTTP 401 {"detail": "Error code: 401 - {'error': {'message': 'Invalid API Key', 'type': 'invalid_request_error', 'code': 'invalid_api_key'}}"} ``` ## Tested With - ✅ groq: 401 "Invalid API Key" - ✅ openai: 401 "Incorrect API key provided" - ✅ together: 401 "Invalid API key provided" - ✅ fireworks: 403 "unauthorized" ## Test Plan **Automated test script:** https://gist.github.com/ashwinb/1199dd7585ffa3f4be67b111cc65f2f3 The test script: 1. Builds separate stacks for each provider 2. Registers models (with validation temporarily disabled for testing) 3. Sends requests with invalid API keys via `x-llamastack-provider-data` header 4. Verifies HTTP status codes are 401/403 (not 500) **Results before fix:** All providers returned 500 **Results after fix:** All providers correctly return 401/403 **Manual verification:** ```bash # 1. Build stack llama stack build --image-type venv --providers inference=remote::groq # 2. Start stack llama stack run # 3. Send request with invalid API key curl http://localhost:8321/v1/chat/completions \ -H "Content-Type: application/json" \ -H 'x-llamastack-provider-data: {"groq_api_key": "invalid-key"}' \ -d '{"model": "groq/llama3-70b-8192", "messages": [{"role": "user", "content": "test"}]}' # Expected: HTTP 401 with provider error message (not 500) ``` ## Impact - Works with all remote providers using OpenAI SDK (groq, openai, together, fireworks, etc.) - Works with any provider SDK that follows the pattern of exceptions with `status_code` attribute - No breaking changes - only affects error responses --- llama_stack/core/server/server.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/llama_stack/core/server/server.py b/llama_stack/core/server/server.py index e19092816..edc114381 100644 --- a/llama_stack/core/server/server.py +++ b/llama_stack/core/server/server.py @@ -138,6 +138,13 @@ def translate_exception(exc: Exception) -> HTTPException | RequestValidationErro return HTTPException(status_code=httpx.codes.NOT_IMPLEMENTED, detail=f"Not implemented: {str(exc)}") elif isinstance(exc, AuthenticationRequiredError): return HTTPException(status_code=httpx.codes.UNAUTHORIZED, detail=f"Authentication required: {str(exc)}") + elif hasattr(exc, "status_code") and isinstance(getattr(exc, "status_code", None), int): + # Handle provider SDK exceptions (e.g., OpenAI's APIStatusError and subclasses) + # These include AuthenticationError (401), PermissionDeniedError (403), etc. + # This preserves the actual HTTP status code from the provider + status_code = exc.status_code + detail = str(exc) + return HTTPException(status_code=status_code, detail=detail) else: return HTTPException( status_code=httpx.codes.INTERNAL_SERVER_ERROR,