mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-25 17:11:12 +00:00 
			
		
		
		
	
		
			Some checks failed
		
		
	
	Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 4s
				
			Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 4s
				
			Test Llama Stack Build / build-single-provider (push) Failing after 3s
				
			Test Llama Stack Build / generate-matrix (push) Successful in 5s
				
			SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 9s
				
			SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 9s
				
			Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
				
			Python Package Build Test / build (3.12) (push) Failing after 1s
				
			Python Package Build Test / build (3.13) (push) Failing after 1s
				
			Vector IO Integration Tests / test-matrix (push) Failing after 9s
				
			Test Llama Stack Build / build-custom-container-distribution (push) Failing after 3s
				
			API Conformance Tests / check-schema-compatibility (push) Successful in 13s
				
			Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 4s
				
			Unit Tests / unit-tests (3.12) (push) Failing after 4s
				
			Unit Tests / unit-tests (3.13) (push) Failing after 3s
				
			Test External API and Providers / test-external (venv) (push) Failing after 5s
				
			Test Llama Stack Build / build (push) Failing after 31s
				
			UI Tests / ui-tests (22) (push) Successful in 46s
				
			Pre-commit / pre-commit (push) Successful in 2m13s
				
			# What does this PR do? This PR fixes issues with the WatsonX provider so it works correctly with LiteLLM. The main problem was that WatsonX requests failed because the provider data validator didn’t properly handle the API key and project ID. This was fixed by updating the WatsonXProviderDataValidator and ensuring the provider data is loaded correctly. The openai_chat_completion method was also updated to match the behavior of other providers while adding WatsonX-specific fields like project_id. It still calls await super().openai_chat_completion.__func__(self, params) to keep the existing setup and tracing logic. After these changes, WatsonX requests now run correctly. ## Test Plan The changes were tested by running chat completion requests and confirming that credentials and project parameters are passed correctly. I have tested with my WatsonX credentials, by using the cli with `uv run llama-stack-client inference chat-completion --session` --------- Signed-off-by: Sébastien Han <seb@redhat.com> Co-authored-by: Sébastien Han <seb@redhat.com>
		
			
				
	
	
		
			45 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			45 lines
		
	
	
	
		
			1.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright (c) Meta Platforms, Inc. and affiliates.
 | |
| # All rights reserved.
 | |
| #
 | |
| # This source code is licensed under the terms described in the LICENSE file in
 | |
| # the root directory of this source tree.
 | |
| 
 | |
| import os
 | |
| from typing import Any
 | |
| 
 | |
| from pydantic import BaseModel, Field
 | |
| 
 | |
| from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
 | |
| from llama_stack.schema_utils import json_schema_type
 | |
| 
 | |
| 
 | |
| class WatsonXProviderDataValidator(BaseModel):
 | |
|     watsonx_project_id: str | None = Field(
 | |
|         default=None,
 | |
|         description="IBM WatsonX project ID",
 | |
|     )
 | |
|     watsonx_api_key: str | None = None
 | |
| 
 | |
| 
 | |
| @json_schema_type
 | |
| class WatsonXConfig(RemoteInferenceProviderConfig):
 | |
|     url: str = Field(
 | |
|         default_factory=lambda: os.getenv("WATSONX_BASE_URL", "https://us-south.ml.cloud.ibm.com"),
 | |
|         description="A base url for accessing the watsonx.ai",
 | |
|     )
 | |
|     project_id: str | None = Field(
 | |
|         default=None,
 | |
|         description="The watsonx.ai project ID",
 | |
|     )
 | |
|     timeout: int = Field(
 | |
|         default=60,
 | |
|         description="Timeout for the HTTP requests",
 | |
|     )
 | |
| 
 | |
|     @classmethod
 | |
|     def sample_run_config(cls, **kwargs) -> dict[str, Any]:
 | |
|         return {
 | |
|             "url": "${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com}",
 | |
|             "api_key": "${env.WATSONX_API_KEY:=}",
 | |
|             "project_id": "${env.WATSONX_PROJECT_ID:=}",
 | |
|         }
 |