diff --git a/llama_stack/providers/remote/inference/tgi/config.py b/llama_stack/providers/remote/inference/tgi/config.py index 020ca8bf8..4f690dec6 100644 --- a/llama_stack/providers/remote/inference/tgi/config.py +++ b/llama_stack/providers/remote/inference/tgi/config.py @@ -15,10 +15,6 @@ class TGIImplConfig(BaseModel): url: str = Field( description="The URL for the TGI serving endpoint", ) - # api_token: Optional[SecretStr] = Field( - # default=None, - # description="A bearer token if your TGI endpoint is protected.", - # ) @classmethod def sample_run_config(cls, url: str = "${env.TGI_URL}", **kwargs): diff --git a/tests/client-sdk/inference/test_inference.py b/tests/client-sdk/inference/test_inference.py index 175e5f1f2..671a37926 100644 --- a/tests/client-sdk/inference/test_inference.py +++ b/tests/client-sdk/inference/test_inference.py @@ -225,7 +225,6 @@ def test_text_chat_completion_with_tool_calling_and_non_streaming( tool_prompt_format=provider_tool_format, stream=False, ) - print(response) # No content is returned for the system message since we expect the # response to be a tool call assert response.completion_message.content == ""