style: remove prints in codebase (#1146)

# What does this PR do?
- replace prints in codebase with logger
- update print_table to use rich Table

## Test Plan
- library client script in
https://github.com/meta-llama/llama-stack/pull/1145

```
llama stack list-providers
```
<img width="1407" alt="image"
src="https://github.com/user-attachments/assets/906b4f54-9e42-4e55-8968-7e3aa45525b2"
/>


[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-02-18 19:41:37 -08:00 committed by GitHub
parent e8cb9e0adb
commit 37cf60b732
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 38 additions and 65 deletions

View file

@ -4,75 +4,36 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
import re
import textwrap
from typing import Iterable from typing import Iterable
from termcolor import cprint from rich.console import Console
from rich.table import Table
def strip_ansi_colors(text):
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
return ansi_escape.sub("", text)
def format_row(row, col_widths):
def wrap(text, width):
lines = []
for line in text.split("\n"):
if line.strip() == "":
lines.append("")
else:
lines.extend(textwrap.wrap(line, width, break_long_words=False, replace_whitespace=False))
return lines
wrapped = [wrap(item, width) for item, width in zip(row, col_widths, strict=False)]
max_lines = max(len(subrow) for subrow in wrapped)
lines = []
for i in range(max_lines):
line = []
for cell_lines, width in zip(wrapped, col_widths, strict=False):
value = cell_lines[i] if i < len(cell_lines) else ""
line.append(value + " " * (width - len(strip_ansi_colors(value))))
lines.append("| " + (" | ".join(line)) + " |")
return "\n".join(lines)
def print_table(rows, headers=None, separate_rows: bool = False, sort_by: Iterable[int] = tuple()): def print_table(rows, headers=None, separate_rows: bool = False, sort_by: Iterable[int] = tuple()):
def itemlen(item): # Convert rows and handle None values
return max([len(line) for line in strip_ansi_colors(item).split("\n")])
rows = [[x or "" for x in row] for row in rows] rows = [[x or "" for x in row] for row in rows]
# Sort rows if sort_by is specified
if sort_by: if sort_by:
rows.sort(key=lambda x: tuple(x[i] for i in sort_by)) rows.sort(key=lambda x: tuple(x[i] for i in sort_by))
if not headers: # Create Rich table
col_widths = [max(itemlen(item) for item in col) for col in zip(*rows, strict=False)] table = Table(show_lines=separate_rows)
else:
col_widths = [
max(
itemlen(header),
max(itemlen(item) for item in col),
)
for header, col in zip(headers, zip(*rows, strict=False), strict=False)
]
col_widths = [min(w, 80) for w in col_widths]
header_line = "+".join("-" * (width + 2) for width in col_widths)
header_line = f"+{header_line}+"
# Add headers if provided
if headers: if headers:
print(header_line) for header in headers:
cprint(format_row(headers, col_widths), "white", attrs=["bold"]) table.add_column(header, style="bold white")
else:
# Add unnamed columns based on first row
for _ in range(len(rows[0]) if rows else 0):
table.add_column()
print(header_line) # Add rows
for row in rows: for row in rows:
print(format_row(row, col_widths)) table.add_row(*row)
if separate_rows:
print(header_line)
if not separate_rows: # Print table
print(header_line) console = Console()
console.print(table)

View file

@ -47,6 +47,8 @@ from llama_stack.providers.utils.telemetry.tracing import (
start_trace, start_trace,
) )
logger = logging.getLogger(__name__)
T = TypeVar("T") T = TypeVar("T")
@ -87,7 +89,7 @@ def convert_to_pydantic(annotation: Any, value: Any) -> Any:
try: try:
return [convert_to_pydantic(item_type, item) for item in value] return [convert_to_pydantic(item_type, item) for item in value]
except Exception: except Exception:
print(f"Error converting list {value} into {item_type}") logger.error(f"Error converting list {value} into {item_type}")
return value return value
elif origin is dict: elif origin is dict:
@ -95,7 +97,7 @@ def convert_to_pydantic(annotation: Any, value: Any) -> Any:
try: try:
return {k: convert_to_pydantic(val_type, v) for k, v in value.items()} return {k: convert_to_pydantic(val_type, v) for k, v in value.items()}
except Exception: except Exception:
print(f"Error converting dict {value} into {val_type}") logger.error(f"Error converting dict {value} into {val_type}")
return value return value
try: try:
@ -111,9 +113,8 @@ def convert_to_pydantic(annotation: Any, value: Any) -> Any:
return convert_to_pydantic(union_type, value) return convert_to_pydantic(union_type, value)
except Exception: except Exception:
continue continue
cprint( logger.warning(
f"Warning: direct client failed to convert parameter {value} into {annotation}: {e}", f"Warning: direct client failed to convert parameter {value} into {annotation}: {e}",
"yellow",
) )
return value return value
@ -152,7 +153,7 @@ class LlamaStackAsLibraryClient(LlamaStackClient):
for handler in root_logger.handlers[:]: for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler) root_logger.removeHandler(handler)
print(f"Removed handler {handler.__class__.__name__} from root logger") logger.info(f"Removed handler {handler.__class__.__name__} from root logger")
def request(self, *args, **kwargs): def request(self, *args, **kwargs):
if kwargs.get("stream"): if kwargs.get("stream"):

View file

@ -4,6 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
import logging
import warnings import warnings
from typing import AsyncIterator, List, Optional, Union from typing import AsyncIterator, List, Optional, Union
@ -25,7 +26,12 @@ from llama_stack.apis.inference import (
ToolChoice, ToolChoice,
ToolConfig, ToolConfig,
) )
from llama_stack.models.llama.datatypes import CoreModelId, SamplingParams, ToolDefinition, ToolPromptFormat from llama_stack.models.llama.datatypes import (
CoreModelId,
SamplingParams,
ToolDefinition,
ToolPromptFormat,
)
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ModelRegistryHelper, ModelRegistryHelper,
build_model_alias, build_model_alias,
@ -43,6 +49,8 @@ from .openai_utils import (
) )
from .utils import _is_nvidia_hosted, check_health from .utils import _is_nvidia_hosted, check_health
logger = logging.getLogger(__name__)
_MODEL_ALIASES = [ _MODEL_ALIASES = [
build_model_alias( build_model_alias(
"meta/llama3-8b-instruct", "meta/llama3-8b-instruct",
@ -90,7 +98,7 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
# TODO(mf): filter by available models # TODO(mf): filter by available models
ModelRegistryHelper.__init__(self, model_aliases=_MODEL_ALIASES) ModelRegistryHelper.__init__(self, model_aliases=_MODEL_ALIASES)
print(f"Initializing NVIDIAInferenceAdapter({config.url})...") logger.info(f"Initializing NVIDIAInferenceAdapter({config.url})...")
if _is_nvidia_hosted(config): if _is_nvidia_hosted(config):
if not config.api_key: if not config.api_key:

View file

@ -4,12 +4,15 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
import logging
from typing import Tuple from typing import Tuple
import httpx import httpx
from . import NVIDIAConfig from . import NVIDIAConfig
logger = logging.getLogger(__name__)
def _is_nvidia_hosted(config: NVIDIAConfig) -> bool: def _is_nvidia_hosted(config: NVIDIAConfig) -> bool:
return "integrate.api.nvidia.com" in config.url return "integrate.api.nvidia.com" in config.url
@ -42,7 +45,7 @@ async def check_health(config: NVIDIAConfig) -> None:
RuntimeError: If the server is not running or ready RuntimeError: If the server is not running or ready
""" """
if not _is_nvidia_hosted(config): if not _is_nvidia_hosted(config):
print("Checking NVIDIA NIM health...") logger.info("Checking NVIDIA NIM health...")
try: try:
is_live, is_ready = await _get_health(config.url) is_live, is_ready = await _get_health(config.url)
if not is_live: if not is_live: