fix: remove parts of trace_protocol and telemetry that were overlooked

This commit is contained in:
Emilio Garcia 2025-11-20 13:48:36 -05:00
parent 9da36303a7
commit 9d24211d9d
35 changed files with 0 additions and 154 deletions

View file

@ -9,7 +9,6 @@ data:
- inference - inference
- files - files
- safety - safety
- telemetry
- tool_runtime - tool_runtime
- vector_io - vector_io
providers: providers:
@ -67,12 +66,6 @@ data:
db: ${env.POSTGRES_DB:=llamastack} db: ${env.POSTGRES_DB:=llamastack}
user: ${env.POSTGRES_USER:=llamastack} user: ${env.POSTGRES_USER:=llamastack}
password: ${env.POSTGRES_PASSWORD:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack}
telemetry:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
sinks: ${env.TELEMETRY_SINKS:=console}
tool_runtime: tool_runtime:
- provider_id: brave-search - provider_id: brave-search
provider_type: remote::brave-search provider_type: remote::brave-search

View file

@ -8,7 +8,6 @@ data:
- inference - inference
- files - files
- safety - safety
- telemetry
- tool_runtime - tool_runtime
- vector_io - vector_io
providers: providers:
@ -73,12 +72,6 @@ data:
db: ${env.POSTGRES_DB:=llamastack} db: ${env.POSTGRES_DB:=llamastack}
user: ${env.POSTGRES_USER:=llamastack} user: ${env.POSTGRES_USER:=llamastack}
password: ${env.POSTGRES_PASSWORD:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack}
telemetry:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
sinks: ${env.TELEMETRY_SINKS:=console}
tool_runtime: tool_runtime:
- provider_id: brave-search - provider_id: brave-search
provider_type: remote::brave-search provider_type: remote::brave-search

View file

@ -116,10 +116,6 @@ The following environment variables can be configured:
- `BRAVE_SEARCH_API_KEY`: Brave Search API key - `BRAVE_SEARCH_API_KEY`: Brave Search API key
- `TAVILY_SEARCH_API_KEY`: Tavily Search API key - `TAVILY_SEARCH_API_KEY`: Tavily Search API key
### Telemetry Configuration
- `OTEL_SERVICE_NAME`: OpenTelemetry service name
- `OTEL_EXPORTER_OTLP_ENDPOINT`: OpenTelemetry collector endpoint URL
## Enabling Providers ## Enabling Providers
You can enable specific providers by setting appropriate environment variables. For example, You can enable specific providers by setting appropriate environment variables. For example,

View file

@ -360,32 +360,6 @@ Methods:
- <code title="post /v1/synthetic-data-generation/generate">client.synthetic_data_generation.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/synthetic_data_generation.py">generate</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/synthetic_data_generation_generate_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/synthetic_data_generation_response.py">SyntheticDataGenerationResponse</a></code> - <code title="post /v1/synthetic-data-generation/generate">client.synthetic_data_generation.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/synthetic_data_generation.py">generate</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/synthetic_data_generation_generate_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/synthetic_data_generation_response.py">SyntheticDataGenerationResponse</a></code>
## Telemetry
Types:
```python
from llama_stack_client.types import (
QuerySpansResponse,
SpanWithStatus,
Trace,
TelemetryGetSpanResponse,
TelemetryGetSpanTreeResponse,
TelemetryQuerySpansResponse,
TelemetryQueryTracesResponse,
)
```
Methods:
- <code title="get /v1/telemetry/traces/{trace_id}/spans/{span_id}">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">get_span</a>(span_id, \*, trace_id) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_get_span_response.py">TelemetryGetSpanResponse</a></code>
- <code title="get /v1/telemetry/spans/{span_id}/tree">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">get_span_tree</a>(span_id, \*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_get_span_tree_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_get_span_tree_response.py">TelemetryGetSpanTreeResponse</a></code>
- <code title="get /v1/telemetry/traces/{trace_id}">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">get_trace</a>(trace_id) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/trace.py">Trace</a></code>
- <code title="post /v1/telemetry/events">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">log_event</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_log_event_params.py">params</a>) -> None</code>
- <code title="get /v1/telemetry/spans">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">query_spans</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_spans_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_spans_response.py">TelemetryQuerySpansResponse</a></code>
- <code title="get /v1/telemetry/traces">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">query_traces</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_traces_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_traces_response.py">TelemetryQueryTracesResponse</a></code>
- <code title="post /v1/telemetry/spans/export">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">save_spans_to_dataset</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py">params</a>) -> None</code>
## Datasetio ## Datasetio
Types: Types:

View file

@ -346,7 +346,6 @@ exclude = [
"^src/llama_stack/providers/utils/scoring/aggregation_utils\\.py$", "^src/llama_stack/providers/utils/scoring/aggregation_utils\\.py$",
"^src/llama_stack/providers/utils/scoring/base_scoring_fn\\.py$", "^src/llama_stack/providers/utils/scoring/base_scoring_fn\\.py$",
"^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$", "^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$",
"^src/llama_stack/providers/utils/telemetry/trace_protocol\\.py$",
"^src/llama_stack/providers/utils/telemetry/tracing\\.py$", "^src/llama_stack/providers/utils/telemetry/tracing\\.py$",
"^src/llama_stack/distributions/template\\.py$", "^src/llama_stack/distributions/template\\.py$",
] ]

View file

@ -8,7 +8,6 @@
Schema discovery and collection for OpenAPI generation. Schema discovery and collection for OpenAPI generation.
""" """
import importlib
from typing import Any from typing import Any
@ -20,23 +19,6 @@ def _ensure_components_schemas(openapi_schema: dict[str, Any]) -> None:
openapi_schema["components"]["schemas"] = {} openapi_schema["components"]["schemas"] = {}
def _load_extra_schema_modules() -> None:
"""
Import modules outside llama_stack_api that use schema_utils to register schemas.
The API package already imports its submodules via __init__, but server-side modules
like telemetry need to be imported explicitly so their decorator side effects run.
"""
extra_modules = [
"llama_stack.core.telemetry.telemetry",
]
for module_name in extra_modules:
try:
importlib.import_module(module_name)
except ImportError:
continue
def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]) -> None: def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]) -> None:
""" """
Extract $defs from a schema, move them to components/schemas, and fix references. Extract $defs from a schema, move them to components/schemas, and fix references.
@ -79,9 +61,6 @@ def _ensure_json_schema_types_included(openapi_schema: dict[str, Any]) -> dict[s
iter_registered_schema_types, iter_registered_schema_types,
) )
# Import extra modules (e.g., telemetry) whose schema registrations live outside llama_stack_api
_load_extra_schema_modules()
# Handle explicitly registered schemas first (union types, Annotated structs, etc.) # Handle explicitly registered schemas first (union types, Annotated structs, etc.)
for registration_info in iter_registered_schema_types(): for registration_info in iter_registered_schema_types():
schema_type = registration_info.type schema_type = registration_info.type

View file

@ -371,12 +371,6 @@ class SafetyConfig(BaseModel):
) )
class TelemetryConfig(BaseModel):
"""Configuration for telemetry collection."""
enabled: bool = Field(default=False, description="Whether telemetry collection is enabled")
class QuotaPeriod(StrEnum): class QuotaPeriod(StrEnum):
DAY = "day" DAY = "day"
@ -542,11 +536,6 @@ can be instantiated multiple times (with different configs) if necessary.
description="Configuration for default moderations model", description="Configuration for default moderations model",
) )
telemetry: TelemetryConfig | None = Field(
default=None,
description="Configuration for telemetry collection",
)
@field_validator("external_providers_dir") @field_validator("external_providers_dir")
@classmethod @classmethod
def validate_external_providers_dir(cls, v): def validate_external_providers_dir(cls, v):

View file

@ -281,8 +281,6 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true
vector_stores: vector_stores:
default_provider_id: faiss default_provider_id: faiss
default_embedding_model: default_embedding_model:

View file

@ -272,8 +272,6 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true
vector_stores: vector_stores:
default_provider_id: faiss default_provider_id: faiss
default_embedding_model: default_embedding_model:

View file

@ -140,5 +140,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -131,5 +131,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -153,5 +153,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -138,5 +138,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -135,5 +135,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -114,5 +114,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -132,5 +132,3 @@ registered_resources:
provider_id: tavily-search provider_id: tavily-search
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -251,5 +251,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -114,5 +114,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -284,8 +284,6 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true
vector_stores: vector_stores:
default_provider_id: faiss default_provider_id: faiss
default_embedding_model: default_embedding_model:

View file

@ -275,8 +275,6 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true
vector_stores: vector_stores:
default_provider_id: faiss default_provider_id: faiss
default_embedding_model: default_embedding_model:

View file

@ -281,8 +281,6 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true
vector_stores: vector_stores:
default_provider_id: faiss default_provider_id: faiss
default_embedding_model: default_embedding_model:

View file

@ -272,8 +272,6 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true
vector_stores: vector_stores:
default_provider_id: faiss default_provider_id: faiss
default_embedding_model: default_embedding_model:

View file

@ -24,7 +24,6 @@ from llama_stack.core.datatypes import (
Provider, Provider,
SafetyConfig, SafetyConfig,
ShieldInput, ShieldInput,
TelemetryConfig,
ToolGroupInput, ToolGroupInput,
VectorStoresConfig, VectorStoresConfig,
) )
@ -189,7 +188,6 @@ class RunConfigSettings(BaseModel):
default_benchmarks: list[BenchmarkInput] | None = None default_benchmarks: list[BenchmarkInput] | None = None
vector_stores_config: VectorStoresConfig | None = None vector_stores_config: VectorStoresConfig | None = None
safety_config: SafetyConfig | None = None safety_config: SafetyConfig | None = None
telemetry: TelemetryConfig = Field(default_factory=lambda: TelemetryConfig(enabled=True))
storage_backends: dict[str, Any] | None = None storage_backends: dict[str, Any] | None = None
storage_stores: dict[str, Any] | None = None storage_stores: dict[str, Any] | None = None
@ -289,7 +287,6 @@ class RunConfigSettings(BaseModel):
"server": { "server": {
"port": 8321, "port": 8321,
}, },
"telemetry": self.telemetry.model_dump(exclude_none=True) if self.telemetry else None,
} }
if self.vector_stores_config: if self.vector_stores_config:

View file

@ -132,5 +132,3 @@ registered_resources:
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry:
enabled: true

View file

@ -1,22 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
def telemetry_traceable(cls):
"""
Mark a protocol for automatic tracing when telemetry is enabled.
This is a metadata-only decorator with no dependencies on core.
Actual tracing is applied by core routers at runtime if telemetry is enabled.
Usage:
@runtime_checkable
@telemetry_traceable
class MyProtocol(Protocol):
...
"""
cls.__marked_for_tracing__ = True
return cls

View file

@ -9,7 +9,6 @@ from typing import Annotated, Literal, Protocol, runtime_checkable
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.openai_responses import ( from llama_stack_api.openai_responses import (
OpenAIResponseInputFunctionToolCallOutput, OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseMCPApprovalRequest, OpenAIResponseMCPApprovalRequest,
@ -157,7 +156,6 @@ class ConversationItemDeletedResource(BaseModel):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class Conversations(Protocol): class Conversations(Protocol):
"""Conversations """Conversations

View file

@ -102,7 +102,6 @@ class Api(Enum, metaclass=DynamicApiMeta):
:cvar eval: Model evaluation and benchmarking framework :cvar eval: Model evaluation and benchmarking framework
:cvar post_training: Fine-tuning and model training :cvar post_training: Fine-tuning and model training
:cvar tool_runtime: Tool execution and management :cvar tool_runtime: Tool execution and management
:cvar telemetry: Observability and system monitoring
:cvar models: Model metadata and management :cvar models: Model metadata and management
:cvar shields: Safety shield implementations :cvar shields: Safety shield implementations
:cvar datasets: Dataset creation and management :cvar datasets: Dataset creation and management

View file

@ -11,7 +11,6 @@ from fastapi import File, Form, Response, UploadFile
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack_api.common.responses import Order from llama_stack_api.common.responses import Order
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.schema_utils import json_schema_type, webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1 from llama_stack_api.version import LLAMA_STACK_API_V1
@ -102,7 +101,6 @@ class OpenAIFileDeleteResponse(BaseModel):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class Files(Protocol): class Files(Protocol):
"""Files """Files

View file

@ -22,7 +22,6 @@ from llama_stack_api.common.content_types import InterleavedContent
from llama_stack_api.common.responses import ( from llama_stack_api.common.responses import (
Order, Order,
) )
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.models import Model from llama_stack_api.models import Model
from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA from llama_stack_api.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
@ -989,7 +988,6 @@ class OpenAIEmbeddingsRequestWithExtraBody(BaseModel, extra="allow"):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class InferenceProvider(Protocol): class InferenceProvider(Protocol):
""" """
This protocol defines the interface that should be implemented by all inference providers. This protocol defines the interface that should be implemented by all inference providers.

View file

@ -9,7 +9,6 @@ from typing import Any, Literal, Protocol, runtime_checkable
from pydantic import BaseModel, ConfigDict, Field, field_validator from pydantic import BaseModel, ConfigDict, Field, field_validator
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.resource import Resource, ResourceType from llama_stack_api.resource import Resource, ResourceType
from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.schema_utils import json_schema_type, webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1 from llama_stack_api.version import LLAMA_STACK_API_V1
@ -106,7 +105,6 @@ class OpenAIListModelsResponse(BaseModel):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class Models(Protocol): class Models(Protocol):
async def list_models(self) -> ListModelsResponse: async def list_models(self) -> ListModelsResponse:
"""List all models. """List all models.

View file

@ -10,7 +10,6 @@ from typing import Protocol, runtime_checkable
from pydantic import BaseModel, Field, field_validator, model_validator from pydantic import BaseModel, Field, field_validator, model_validator
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.schema_utils import json_schema_type, webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1 from llama_stack_api.version import LLAMA_STACK_API_V1
@ -93,7 +92,6 @@ class ListPromptsResponse(BaseModel):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class Prompts(Protocol): class Prompts(Protocol):
"""Prompts """Prompts

View file

@ -9,7 +9,6 @@ from typing import Any, Protocol, runtime_checkable
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.inference import OpenAIMessageParam from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.schema_utils import json_schema_type, webmethod
from llama_stack_api.shields import Shield from llama_stack_api.shields import Shield
@ -94,7 +93,6 @@ class ShieldStore(Protocol):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class Safety(Protocol): class Safety(Protocol):
"""Safety """Safety

View file

@ -8,7 +8,6 @@ from typing import Any, Literal, Protocol, runtime_checkable
from pydantic import BaseModel from pydantic import BaseModel
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.resource import Resource, ResourceType from llama_stack_api.resource import Resource, ResourceType
from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.schema_utils import json_schema_type, webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1 from llama_stack_api.version import LLAMA_STACK_API_V1
@ -49,7 +48,6 @@ class ListShieldsResponse(BaseModel):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class Shields(Protocol): class Shields(Protocol):
@webmethod(route="/shields", method="GET", level=LLAMA_STACK_API_V1) @webmethod(route="/shields", method="GET", level=LLAMA_STACK_API_V1)
async def list_shields(self) -> ListShieldsResponse: async def list_shields(self) -> ListShieldsResponse:

View file

@ -11,7 +11,6 @@ from pydantic import BaseModel
from typing_extensions import runtime_checkable from typing_extensions import runtime_checkable
from llama_stack_api.common.content_types import URL, InterleavedContent from llama_stack_api.common.content_types import URL, InterleavedContent
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.resource import Resource, ResourceType from llama_stack_api.resource import Resource, ResourceType
from llama_stack_api.schema_utils import json_schema_type, webmethod from llama_stack_api.schema_utils import json_schema_type, webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1 from llama_stack_api.version import LLAMA_STACK_API_V1
@ -109,7 +108,6 @@ class ListToolDefsResponse(BaseModel):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class ToolGroups(Protocol): class ToolGroups(Protocol):
@webmethod(route="/toolgroups", method="POST", level=LLAMA_STACK_API_V1, deprecated=True) @webmethod(route="/toolgroups", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
async def register_tool_group( async def register_tool_group(
@ -191,7 +189,6 @@ class SpecialToolGroup(Enum):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class ToolRuntime(Protocol): class ToolRuntime(Protocol):
tool_store: ToolStore | None = None tool_store: ToolStore | None = None

View file

@ -13,7 +13,6 @@ from typing import Annotated, Any, Literal, Protocol, runtime_checkable
from fastapi import Body, Query from fastapi import Body, Query
from pydantic import BaseModel, Field, field_validator from pydantic import BaseModel, Field, field_validator
from llama_stack_api.common.tracing import telemetry_traceable
from llama_stack_api.inference import InterleavedContent from llama_stack_api.inference import InterleavedContent
from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod
from llama_stack_api.vector_stores import VectorStore from llama_stack_api.vector_stores import VectorStore
@ -572,7 +571,6 @@ class VectorStoreTable(Protocol):
@runtime_checkable @runtime_checkable
@telemetry_traceable
class VectorIO(Protocol): class VectorIO(Protocol):
vector_store_table: VectorStoreTable | None = None vector_store_table: VectorStoreTable | None = None