fix: rename decorator to telemetry_traceable

more descriptive, less redundant

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-06 12:47:20 -05:00
parent d1adc5a6eb
commit 89b23ec21d
10 changed files with 21 additions and 21 deletions

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
def mark_as_traced(cls):
def telemetry_traceable(cls):
"""
Mark a protocol for automatic tracing when telemetry is enabled.
@ -14,7 +14,7 @@ def mark_as_traced(cls):
Usage:
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class MyProtocol(Protocol):
...
"""

View file

@ -20,7 +20,7 @@ from llama_stack.apis.agents.openai_responses import (
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseOutputMessageWebSearchToolCall,
)
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
@ -157,7 +157,7 @@ class ConversationItemDeletedResource(BaseModel):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class Conversations(Protocol):
"""Conversations

View file

@ -11,7 +11,7 @@ from fastapi import File, Form, Response, UploadFile
from pydantic import BaseModel, Field
from llama_stack.apis.common.responses import Order
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, webmethod
@ -102,7 +102,7 @@ class OpenAIFileDeleteResponse(BaseModel):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class Files(Protocol):
"""Files

View file

@ -20,7 +20,7 @@ from typing_extensions import TypedDict
from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent
from llama_stack.apis.common.responses import MetricResponseMixin, Order
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.models import Model
from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
from llama_stack.models.llama.datatypes import (
@ -1159,7 +1159,7 @@ class OpenAIEmbeddingsRequestWithExtraBody(BaseModel, extra="allow"):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class InferenceProvider(Protocol):
"""
This protocol defines the interface that should be implemented by all inference providers.

View file

@ -9,7 +9,7 @@ from typing import Any, Literal, Protocol, runtime_checkable
from pydantic import BaseModel, ConfigDict, Field, field_validator
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, webmethod
@ -105,7 +105,7 @@ class OpenAIListModelsResponse(BaseModel):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class Models(Protocol):
async def list_models(self) -> ListModelsResponse:
"""List all models.

View file

@ -10,7 +10,7 @@ from typing import Protocol, runtime_checkable
from pydantic import BaseModel, Field, field_validator, model_validator
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, webmethod
@ -92,7 +92,7 @@ class ListPromptsResponse(BaseModel):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class Prompts(Protocol):
"""Prompts

View file

@ -9,7 +9,7 @@ from typing import Any, Protocol, runtime_checkable
from pydantic import BaseModel, Field
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.inference import OpenAIMessageParam
from llama_stack.apis.shields import Shield
from llama_stack.apis.version import LLAMA_STACK_API_V1
@ -94,7 +94,7 @@ class ShieldStore(Protocol):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class Safety(Protocol):
"""Safety

View file

@ -8,7 +8,7 @@ from typing import Any, Literal, Protocol, runtime_checkable
from pydantic import BaseModel
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, webmethod
@ -48,7 +48,7 @@ class ListShieldsResponse(BaseModel):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class Shields(Protocol):
@webmethod(route="/shields", method="GET", level=LLAMA_STACK_API_V1)
async def list_shields(self) -> ListShieldsResponse:

View file

@ -11,7 +11,7 @@ from pydantic import BaseModel
from typing_extensions import runtime_checkable
from llama_stack.apis.common.content_types import URL, InterleavedContent
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, webmethod
@ -107,7 +107,7 @@ class ListToolDefsResponse(BaseModel):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class ToolGroups(Protocol):
@webmethod(route="/toolgroups", method="POST", level=LLAMA_STACK_API_V1)
async def register_tool_group(
@ -189,7 +189,7 @@ class SpecialToolGroup(Enum):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class ToolRuntime(Protocol):
tool_store: ToolStore | None = None

View file

@ -13,7 +13,7 @@ from typing import Annotated, Any, Literal, Protocol, runtime_checkable
from fastapi import Body
from pydantic import BaseModel, Field
from llama_stack.apis.common.tracing import mark_as_traced
from llama_stack.apis.common.tracing import telemetry_traceable
from llama_stack.apis.inference import InterleavedContent
from llama_stack.apis.vector_stores import VectorStore
from llama_stack.apis.version import LLAMA_STACK_API_V1
@ -502,7 +502,7 @@ class VectorStoreTable(Protocol):
@runtime_checkable
@mark_as_traced
@telemetry_traceable
class VectorIO(Protocol):
vector_store_table: VectorStoreTable | None = None