style: remove prints in codebase (#1146)

# What does this PR do?
- replace prints in codebase with logger
- update print_table to use rich Table

## Test Plan
- library client script in
https://github.com/meta-llama/llama-stack/pull/1145

```
llama stack list-providers
```
<img width="1407" alt="image"
src="https://github.com/user-attachments/assets/906b4f54-9e42-4e55-8968-7e3aa45525b2"
/>


[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-02-18 19:41:37 -08:00 committed by GitHub
parent e8cb9e0adb
commit 37cf60b732
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 38 additions and 65 deletions

View file

@ -4,6 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import logging
import warnings
from typing import AsyncIterator, List, Optional, Union
@ -25,7 +26,12 @@ from llama_stack.apis.inference import (
ToolChoice,
ToolConfig,
)
from llama_stack.models.llama.datatypes import CoreModelId, SamplingParams, ToolDefinition, ToolPromptFormat
from llama_stack.models.llama.datatypes import (
CoreModelId,
SamplingParams,
ToolDefinition,
ToolPromptFormat,
)
from llama_stack.providers.utils.inference.model_registry import (
ModelRegistryHelper,
build_model_alias,
@ -43,6 +49,8 @@ from .openai_utils import (
)
from .utils import _is_nvidia_hosted, check_health
logger = logging.getLogger(__name__)
_MODEL_ALIASES = [
build_model_alias(
"meta/llama3-8b-instruct",
@ -90,7 +98,7 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
# TODO(mf): filter by available models
ModelRegistryHelper.__init__(self, model_aliases=_MODEL_ALIASES)
print(f"Initializing NVIDIAInferenceAdapter({config.url})...")
logger.info(f"Initializing NVIDIAInferenceAdapter({config.url})...")
if _is_nvidia_hosted(config):
if not config.api_key: