mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 18:13:44 +00:00
refactor: rename trace_protocol marker to mark_as_traced
Rename the marker decorator in apis/common/tracing.py from trace_protocol to mark_as_traced to disambiguate it from the actual tracing implementation decorator in core/telemetry/trace_protocol.py. Changes: - Rename decorator: trace_protocol -> mark_as_traced - Rename attribute: __trace_protocol__ -> __marked_for_tracing__ - Update all API protocol files to use new decorator name - Update router logic to check for new attribute name This makes it clear that the marker decorator is metadata-only and doesn't perform actual tracing, while the core decorator does the implementation. Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
cd17c62ec4
commit
29f93a6391
11 changed files with 28 additions and 26 deletions
|
|
@ -9,7 +9,7 @@ from typing import Any, Literal, Protocol, runtime_checkable
|
|||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
from llama_stack.apis.common.tracing import trace_protocol
|
||||
from llama_stack.apis.common.tracing import mark_as_traced
|
||||
from llama_stack.apis.resource import Resource, ResourceType
|
||||
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
||||
from llama_stack.schema_utils import json_schema_type, webmethod
|
||||
|
|
@ -105,7 +105,7 @@ class OpenAIListModelsResponse(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@trace_protocol
|
||||
@mark_as_traced
|
||||
class Models(Protocol):
|
||||
async def list_models(self) -> ListModelsResponse:
|
||||
"""List all models.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue