From deedb3fb46f9d0b9108c004b712a3ea5dcf7a6fc Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 7 Oct 2025 10:09:49 -0400 Subject: [PATCH] fix(otel): instrumentation providers do not need a config --- docs/docs/providers/telemetry/inline_otel.mdx | 33 ------------ llama_stack/core/datatypes.py | 4 +- llama_stack/core/instrumentation.py | 2 +- .../inline/instrumentation/otel/otel.py | 3 ++ tests/unit/server/test_resolver.py | 53 +++++++++++++++++++ 5 files changed, 59 insertions(+), 36 deletions(-) delete mode 100644 docs/docs/providers/telemetry/inline_otel.mdx diff --git a/docs/docs/providers/telemetry/inline_otel.mdx b/docs/docs/providers/telemetry/inline_otel.mdx deleted file mode 100644 index 0c0491e8a..000000000 --- a/docs/docs/providers/telemetry/inline_otel.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: "Native OpenTelemetry provider with full access to OTel Tracer and Meter APIs for advanced instrumentation." -sidebar_label: Otel -title: inline::otel ---- - -# inline::otel - -## Description - -Native OpenTelemetry provider with full access to OTel Tracer and Meter APIs for advanced instrumentation. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `service_name` | `` | No | | The name of the service to be monitored. - Is overridden by the OTEL_SERVICE_NAME or OTEL_RESOURCE_ATTRIBUTES environment variables. | -| `service_version` | `str \| None` | No | | The version of the service to be monitored. - Is overriden by the OTEL_RESOURCE_ATTRIBUTES environment variable. | -| `deployment_environment` | `str \| None` | No | | The name of the environment of the service to be monitored. - Is overriden by the OTEL_RESOURCE_ATTRIBUTES environment variable. | -| `span_processor` | `BatchSpanProcessor \| SimpleSpanProcessor \| None` | No | batch | The span processor to use. - Is overriden by the OTEL_SPAN_PROCESSOR environment variable. | - -## Sample Configuration - -```yaml -service_name: ${env.OTEL_SERVICE_NAME:=llama-stack} -service_version: ${env.OTEL_SERVICE_VERSION:=} -deployment_environment: ${env.OTEL_DEPLOYMENT_ENVIRONMENT:=} -span_processor: ${env.OTEL_SPAN_PROCESSOR:=batch} -``` diff --git a/llama_stack/core/datatypes.py b/llama_stack/core/datatypes.py index fe7442315..a6fb8ccfc 100644 --- a/llama_stack/core/datatypes.py +++ b/llama_stack/core/datatypes.py @@ -542,8 +542,8 @@ If not specified, a default SQLite store will be used.""", cfg_cls = instantiate_class_type(entry.config_class) prv_cls = instantiate_class_type(entry.provider_class) - cfg_data = v.get("config") or {} - cfg = TypeAdapter(cfg_cls).validate_python(cfg_data) + cfg_data = v.get("config") + cfg = TypeAdapter(cfg_cls).validate_python(cfg_data) if cfg_data is not None else None return prv_cls(provider=provider_type, config=cfg) diff --git a/llama_stack/core/instrumentation.py b/llama_stack/core/instrumentation.py index 78ec6c567..2c2e3173a 100644 --- a/llama_stack/core/instrumentation.py +++ b/llama_stack/core/instrumentation.py @@ -21,7 +21,7 @@ class InstrumentationProvider(BaseModel): """ provider: str = Field(description="Provider identifier for discriminated unions") - config: BaseModel + config: BaseModel | None = Field(default=None, description="Optional configuration for the instrumentation provider. Most support configuration via environment variables.") @abstractmethod def fastapi_middleware(self, app: FastAPI) -> None: diff --git a/llama_stack/providers/inline/instrumentation/otel/otel.py b/llama_stack/providers/inline/instrumentation/otel/otel.py index 7683fb4bc..d6890bd98 100644 --- a/llama_stack/providers/inline/instrumentation/otel/otel.py +++ b/llama_stack/providers/inline/instrumentation/otel/otel.py @@ -36,6 +36,9 @@ class OTelInstrumentationProvider(InstrumentationProvider): def model_post_init(self, __context): """Initialize OpenTelemetry after Pydantic validation.""" + # Provide default config if missing and validate type + if getattr(self, "config", None) is None: + self.config = OTelConfig() assert isinstance(self.config, OTelConfig) # Type hint for IDE/linter # Warn if OTLP endpoints not configured diff --git a/tests/unit/server/test_resolver.py b/tests/unit/server/test_resolver.py index 1ee1b2f47..dc9e95aa8 100644 --- a/tests/unit/server/test_resolver.py +++ b/tests/unit/server/test_resolver.py @@ -7,11 +7,13 @@ import inspect import sys from typing import Any, Protocol +import yaml from unittest.mock import AsyncMock, MagicMock from pydantic import BaseModel, Field from llama_stack.apis.inference import Inference +from llama_stack.apis.models import ModelInput from llama_stack.core.datatypes import ( Api, Provider, @@ -21,6 +23,8 @@ from llama_stack.core.resolver import resolve_impls from llama_stack.core.routers.inference import InferenceRouter from llama_stack.core.routing_tables.models import ModelsRoutingTable from llama_stack.providers.datatypes import InlineProviderSpec, ProviderSpec +from llama_stack.core.datatypes import StackRunConfig +from llama_stack.providers.inline.instrumentation.otel.otel import OTelInstrumentationProvider def add_protocol_methods(cls: type, protocol: type[Protocol]) -> None: @@ -114,3 +118,52 @@ async def test_resolve_impls_basic(): assert impl.foo == "baz" assert impl.__provider_id__ == "sample_provider" assert impl.__provider_spec__ == provider_spec + + +def test_instrumentation_provider_without_config_parses(): + cfg = StackRunConfig( + image_name="unit-test", + apis=["inference"], + providers={ + "inference": [ + Provider( + provider_id="vllm", + provider_type="remote::vllm", + config={"url": "http://localhost:8000/v1"}, + ) + ] + }, + instrumentation=OTelInstrumentationProvider( + provider="otel", + ), + models=[ + ModelInput(model_id="meta-llama/Llama-3.2-1B-Instruct", provider_id="vllm") + ], + ) + + assert cfg.instrumentation is not None + assert getattr(cfg.instrumentation, "provider", None) == "otel" + + +def test_instrumentation_yaml_without_config_parses(): + yaml_config = """ +image_name: unit-test-yaml +apis: [inference] +providers: + inference: + - provider_id: vllm + provider_type: remote::vllm + config: + url: http://localhost:8000/v1 +instrumentation: + provider: otel +models: + - model_id: meta-llama/Llama-3.2-1B-Instruct + provider_id: vllm +""" + + data = yaml.safe_load(yaml_config) + cfg = StackRunConfig(**data) + + assert cfg.instrumentation is not None + assert getattr(cfg.instrumentation, "provider", None) == "otel"