forked from phoenix-oss/llama-stack-mirror
# What does this PR do? This PR moves all print statements to use logging. Things changed: - Had to add `await start_trace("sse_generator")` to server.py to actually get tracing working. else was not seeing any logs - If no telemetry provider is provided in the run.yaml, we will write to stdout - by default, the logs are going to be in JSON, but we expose an option to configure to output in a human readable way.
21 lines
467 B
Python
21 lines
467 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from enum import Enum
|
|
|
|
from llama_models.schema_utils import json_schema_type
|
|
|
|
from pydantic import BaseModel
|
|
|
|
|
|
class LogFormat(Enum):
|
|
TEXT = "text"
|
|
JSON = "json"
|
|
|
|
|
|
@json_schema_type
|
|
class ConsoleConfig(BaseModel):
|
|
log_format: LogFormat = LogFormat.JSON
|