mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix: remove parts of trace_protocol and telemetry that were overlooked
This commit is contained in:
parent
9da36303a7
commit
9d24211d9d
35 changed files with 0 additions and 154 deletions
|
|
@ -9,7 +9,6 @@ data:
|
|||
- inference
|
||||
- files
|
||||
- safety
|
||||
- telemetry
|
||||
- tool_runtime
|
||||
- vector_io
|
||||
providers:
|
||||
|
|
@ -67,12 +66,6 @@ data:
|
|||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
telemetry:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config:
|
||||
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
|
||||
sinks: ${env.TELEMETRY_SINKS:=console}
|
||||
tool_runtime:
|
||||
- provider_id: brave-search
|
||||
provider_type: remote::brave-search
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ data:
|
|||
- inference
|
||||
- files
|
||||
- safety
|
||||
- telemetry
|
||||
- tool_runtime
|
||||
- vector_io
|
||||
providers:
|
||||
|
|
@ -73,12 +72,6 @@ data:
|
|||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
telemetry:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config:
|
||||
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
|
||||
sinks: ${env.TELEMETRY_SINKS:=console}
|
||||
tool_runtime:
|
||||
- provider_id: brave-search
|
||||
provider_type: remote::brave-search
|
||||
|
|
|
|||
|
|
@ -116,10 +116,6 @@ The following environment variables can be configured:
|
|||
- `BRAVE_SEARCH_API_KEY`: Brave Search API key
|
||||
- `TAVILY_SEARCH_API_KEY`: Tavily Search API key
|
||||
|
||||
### Telemetry Configuration
|
||||
- `OTEL_SERVICE_NAME`: OpenTelemetry service name
|
||||
- `OTEL_EXPORTER_OTLP_ENDPOINT`: OpenTelemetry collector endpoint URL
|
||||
|
||||
## Enabling Providers
|
||||
|
||||
You can enable specific providers by setting appropriate environment variables. For example,
|
||||
|
|
|
|||
|
|
@ -360,32 +360,6 @@ Methods:
|
|||
|
||||
- <code title="post /v1/synthetic-data-generation/generate">client.synthetic_data_generation.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/synthetic_data_generation.py">generate</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/synthetic_data_generation_generate_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/synthetic_data_generation_response.py">SyntheticDataGenerationResponse</a></code>
|
||||
|
||||
## Telemetry
|
||||
|
||||
Types:
|
||||
|
||||
```python
|
||||
from llama_stack_client.types import (
|
||||
QuerySpansResponse,
|
||||
SpanWithStatus,
|
||||
Trace,
|
||||
TelemetryGetSpanResponse,
|
||||
TelemetryGetSpanTreeResponse,
|
||||
TelemetryQuerySpansResponse,
|
||||
TelemetryQueryTracesResponse,
|
||||
)
|
||||
```
|
||||
|
||||
Methods:
|
||||
|
||||
- <code title="get /v1/telemetry/traces/{trace_id}/spans/{span_id}">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">get_span</a>(span_id, \*, trace_id) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_get_span_response.py">TelemetryGetSpanResponse</a></code>
|
||||
- <code title="get /v1/telemetry/spans/{span_id}/tree">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">get_span_tree</a>(span_id, \*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_get_span_tree_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_get_span_tree_response.py">TelemetryGetSpanTreeResponse</a></code>
|
||||
- <code title="get /v1/telemetry/traces/{trace_id}">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">get_trace</a>(trace_id) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/trace.py">Trace</a></code>
|
||||
- <code title="post /v1/telemetry/events">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">log_event</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_log_event_params.py">params</a>) -> None</code>
|
||||
- <code title="get /v1/telemetry/spans">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">query_spans</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_spans_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_spans_response.py">TelemetryQuerySpansResponse</a></code>
|
||||
- <code title="get /v1/telemetry/traces">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">query_traces</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_traces_params.py">params</a>) -> <a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_query_traces_response.py">TelemetryQueryTracesResponse</a></code>
|
||||
- <code title="post /v1/telemetry/spans/export">client.telemetry.<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/resources/telemetry.py">save_spans_to_dataset</a>(\*\*<a href="https://github.com/meta-llama/llama-stack-client-python/tree/main/src/llama_stack_client/types/telemetry_save_spans_to_dataset_params.py">params</a>) -> None</code>
|
||||
|
||||
## Datasetio
|
||||
|
||||
Types:
|
||||
|
|
|
|||
|
|
@ -346,7 +346,6 @@ exclude = [
|
|||
"^src/llama_stack/providers/utils/scoring/aggregation_utils\\.py$",
|
||||
"^src/llama_stack/providers/utils/scoring/base_scoring_fn\\.py$",
|
||||
"^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$",
|
||||
"^src/llama_stack/providers/utils/telemetry/trace_protocol\\.py$",
|
||||
"^src/llama_stack/providers/utils/telemetry/tracing\\.py$",
|
||||
"^src/llama_stack/distributions/template\\.py$",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@
|
|||
Schema discovery and collection for OpenAPI generation.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
from typing import Any
|
||||
|
||||
|
||||
|
|
@ -20,23 +19,6 @@ def _ensure_components_schemas(openapi_schema: dict[str, Any]) -> None:
|
|||
openapi_schema["components"]["schemas"] = {}
|
||||
|
||||
|
||||
def _load_extra_schema_modules() -> None:
|
||||
"""
|
||||
Import modules outside llama_stack_api that use schema_utils to register schemas.
|
||||
|
||||
The API package already imports its submodules via __init__, but server-side modules
|
||||
like telemetry need to be imported explicitly so their decorator side effects run.
|
||||
"""
|
||||
extra_modules = [
|
||||
"llama_stack.core.telemetry.telemetry",
|
||||
]
|
||||
for module_name in extra_modules:
|
||||
try:
|
||||
importlib.import_module(module_name)
|
||||
except ImportError:
|
||||
continue
|
||||
|
||||
|
||||
def _extract_and_fix_defs(schema: dict[str, Any], openapi_schema: dict[str, Any]) -> None:
|
||||
"""
|
||||
Extract $defs from a schema, move them to components/schemas, and fix references.
|
||||
|
|
@ -79,9 +61,6 @@ def _ensure_json_schema_types_included(openapi_schema: dict[str, Any]) -> dict[s
|
|||
iter_registered_schema_types,
|
||||
)
|
||||
|
||||
# Import extra modules (e.g., telemetry) whose schema registrations live outside llama_stack_api
|
||||
_load_extra_schema_modules()
|
||||
|
||||
# Handle explicitly registered schemas first (union types, Annotated structs, etc.)
|
||||
for registration_info in iter_registered_schema_types():
|
||||
schema_type = registration_info.type
|
||||
|
|
|
|||
|
|
@ -371,12 +371,6 @@ class SafetyConfig(BaseModel):
|
|||
)
|
||||
|
||||
|
||||
class TelemetryConfig(BaseModel):
|
||||
"""Configuration for telemetry collection."""
|
||||
|
||||
enabled: bool = Field(default=False, description="Whether telemetry collection is enabled")
|
||||
|
||||
|
||||
class QuotaPeriod(StrEnum):
|
||||
DAY = "day"
|
||||
|
||||
|
|
@ -542,11 +536,6 @@ can be instantiated multiple times (with different configs) if necessary.
|
|||
description="Configuration for default moderations model",
|
||||
)
|
||||
|
||||
telemetry: TelemetryConfig | None = Field(
|
||||
default=None,
|
||||
description="Configuration for telemetry collection",
|
||||
)
|
||||
|
||||
@field_validator("external_providers_dir")
|
||||
@classmethod
|
||||
def validate_external_providers_dir(cls, v):
|
||||
|
|
|
|||
|
|
@ -281,8 +281,6 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
|
|
|
|||
|
|
@ -272,8 +272,6 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
|
|
|
|||
|
|
@ -140,5 +140,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -131,5 +131,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -153,5 +153,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -138,5 +138,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -135,5 +135,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -114,5 +114,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -132,5 +132,3 @@ registered_resources:
|
|||
provider_id: tavily-search
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -251,5 +251,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -114,5 +114,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -284,8 +284,6 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
|
|
|
|||
|
|
@ -275,8 +275,6 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
|
|
|
|||
|
|
@ -281,8 +281,6 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
|
|
|
|||
|
|
@ -272,8 +272,6 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
|
|
|
|||
|
|
@ -24,7 +24,6 @@ from llama_stack.core.datatypes import (
|
|||
Provider,
|
||||
SafetyConfig,
|
||||
ShieldInput,
|
||||
TelemetryConfig,
|
||||
ToolGroupInput,
|
||||
VectorStoresConfig,
|
||||
)
|
||||
|
|
@ -189,7 +188,6 @@ class RunConfigSettings(BaseModel):
|
|||
default_benchmarks: list[BenchmarkInput] | None = None
|
||||
vector_stores_config: VectorStoresConfig | None = None
|
||||
safety_config: SafetyConfig | None = None
|
||||
telemetry: TelemetryConfig = Field(default_factory=lambda: TelemetryConfig(enabled=True))
|
||||
storage_backends: dict[str, Any] | None = None
|
||||
storage_stores: dict[str, Any] | None = None
|
||||
|
||||
|
|
@ -289,7 +287,6 @@ class RunConfigSettings(BaseModel):
|
|||
"server": {
|
||||
"port": 8321,
|
||||
},
|
||||
"telemetry": self.telemetry.model_dump(exclude_none=True) if self.telemetry else None,
|
||||
}
|
||||
|
||||
if self.vector_stores_config:
|
||||
|
|
|
|||
|
|
@ -132,5 +132,3 @@ registered_resources:
|
|||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
telemetry:
|
||||
enabled: true
|
||||
|
|
|
|||
|
|
@ -1,22 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
|
||||
def telemetry_traceable(cls):
|
||||
"""
|
||||
Mark a protocol for automatic tracing when telemetry is enabled.
|
||||
|
||||
This is a metadata-only decorator with no dependencies on core.
|
||||
Actual tracing is applied by core routers at runtime if telemetry is enabled.
|
||||
|
||||
Usage:
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class MyProtocol(Protocol):
|
||||
...
|
||||
"""
|
||||
cls.__marked_for_tracing__ = True
|
||||
return cls
|
||||
|
|
@ -9,7 +9,6 @@ from typing import Annotated, Literal, Protocol, runtime_checkable
|
|||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.openai_responses import (
|
||||
OpenAIResponseInputFunctionToolCallOutput,
|
||||
OpenAIResponseMCPApprovalRequest,
|
||||
|
|
@ -157,7 +156,6 @@ class ConversationItemDeletedResource(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class Conversations(Protocol):
|
||||
"""Conversations
|
||||
|
||||
|
|
|
|||
|
|
@ -102,7 +102,6 @@ class Api(Enum, metaclass=DynamicApiMeta):
|
|||
:cvar eval: Model evaluation and benchmarking framework
|
||||
:cvar post_training: Fine-tuning and model training
|
||||
:cvar tool_runtime: Tool execution and management
|
||||
:cvar telemetry: Observability and system monitoring
|
||||
:cvar models: Model metadata and management
|
||||
:cvar shields: Safety shield implementations
|
||||
:cvar datasets: Dataset creation and management
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ from fastapi import File, Form, Response, UploadFile
|
|||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack_api.common.responses import Order
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1
|
||||
|
||||
|
|
@ -102,7 +101,6 @@ class OpenAIFileDeleteResponse(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class Files(Protocol):
|
||||
"""Files
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ from llama_stack_api.common.content_types import InterleavedContent
|
|||
from llama_stack_api.common.responses import (
|
||||
Order,
|
||||
)
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.models import Model
|
||||
from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
|
||||
|
|
@ -989,7 +988,6 @@ class OpenAIEmbeddingsRequestWithExtraBody(BaseModel, extra="allow"):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class InferenceProvider(Protocol):
|
||||
"""
|
||||
This protocol defines the interface that should be implemented by all inference providers.
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from typing import Any, Literal, Protocol, runtime_checkable
|
|||
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
||||
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.resource import Resource, ResourceType
|
||||
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1
|
||||
|
|
@ -106,7 +105,6 @@ class OpenAIListModelsResponse(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class Models(Protocol):
|
||||
async def list_models(self) -> ListModelsResponse:
|
||||
"""List all models.
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ from typing import Protocol, runtime_checkable
|
|||
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1
|
||||
|
||||
|
|
@ -93,7 +92,6 @@ class ListPromptsResponse(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class Prompts(Protocol):
|
||||
"""Prompts
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from typing import Any, Protocol, runtime_checkable
|
|||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.inference import OpenAIMessageParam
|
||||
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack_api.shields import Shield
|
||||
|
|
@ -94,7 +93,6 @@ class ShieldStore(Protocol):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class Safety(Protocol):
|
||||
"""Safety
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ from typing import Any, Literal, Protocol, runtime_checkable
|
|||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.resource import Resource, ResourceType
|
||||
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1
|
||||
|
|
@ -49,7 +48,6 @@ class ListShieldsResponse(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class Shields(Protocol):
|
||||
@webmethod(route="/shields", method="GET", level=LLAMA_STACK_API_V1)
|
||||
async def list_shields(self) -> ListShieldsResponse:
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ from pydantic import BaseModel
|
|||
from typing_extensions import runtime_checkable
|
||||
|
||||
from llama_stack_api.common.content_types import URL, InterleavedContent
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.resource import Resource, ResourceType
|
||||
from llama_stack_api.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1
|
||||
|
|
@ -109,7 +108,6 @@ class ListToolDefsResponse(BaseModel):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class ToolGroups(Protocol):
|
||||
@webmethod(route="/toolgroups", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
|
||||
async def register_tool_group(
|
||||
|
|
@ -191,7 +189,6 @@ class SpecialToolGroup(Enum):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class ToolRuntime(Protocol):
|
||||
tool_store: ToolStore | None = None
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ from typing import Annotated, Any, Literal, Protocol, runtime_checkable
|
|||
from fastapi import Body, Query
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from llama_stack_api.common.tracing import telemetry_traceable
|
||||
from llama_stack_api.inference import InterleavedContent
|
||||
from llama_stack_api.schema_utils import json_schema_type, register_schema, webmethod
|
||||
from llama_stack_api.vector_stores import VectorStore
|
||||
|
|
@ -572,7 +571,6 @@ class VectorStoreTable(Protocol):
|
|||
|
||||
|
||||
@runtime_checkable
|
||||
@telemetry_traceable
|
||||
class VectorIO(Protocol):
|
||||
vector_store_table: VectorStoreTable | None = None
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue