fix(otel): instrumentation providers do not need a config

This commit is contained in:
Emilio Garcia 2025-10-07 10:09:49 -04:00
parent 8fe3a25158
commit deedb3fb46
5 changed files with 59 additions and 36 deletions

View file

@ -1,33 +0,0 @@
---
description: "Native OpenTelemetry provider with full access to OTel Tracer and Meter APIs for advanced instrumentation."
sidebar_label: Otel
title: inline::otel
---
# inline::otel
## Description
Native OpenTelemetry provider with full access to OTel Tracer and Meter APIs for advanced instrumentation.
## Configuration
| Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------|
| `service_name` | `<class 'str'>` | No | | The name of the service to be monitored.
Is overridden by the OTEL_SERVICE_NAME or OTEL_RESOURCE_ATTRIBUTES environment variables. |
| `service_version` | `str \| None` | No | | The version of the service to be monitored.
Is overriden by the OTEL_RESOURCE_ATTRIBUTES environment variable. |
| `deployment_environment` | `str \| None` | No | | The name of the environment of the service to be monitored.
Is overriden by the OTEL_RESOURCE_ATTRIBUTES environment variable. |
| `span_processor` | `BatchSpanProcessor \| SimpleSpanProcessor \| None` | No | batch | The span processor to use.
Is overriden by the OTEL_SPAN_PROCESSOR environment variable. |
## Sample Configuration
```yaml
service_name: ${env.OTEL_SERVICE_NAME:=llama-stack}
service_version: ${env.OTEL_SERVICE_VERSION:=}
deployment_environment: ${env.OTEL_DEPLOYMENT_ENVIRONMENT:=}
span_processor: ${env.OTEL_SPAN_PROCESSOR:=batch}
```

View file

@ -542,8 +542,8 @@ If not specified, a default SQLite store will be used.""",
cfg_cls = instantiate_class_type(entry.config_class) cfg_cls = instantiate_class_type(entry.config_class)
prv_cls = instantiate_class_type(entry.provider_class) prv_cls = instantiate_class_type(entry.provider_class)
cfg_data = v.get("config") or {} cfg_data = v.get("config")
cfg = TypeAdapter(cfg_cls).validate_python(cfg_data) cfg = TypeAdapter(cfg_cls).validate_python(cfg_data) if cfg_data is not None else None
return prv_cls(provider=provider_type, config=cfg) return prv_cls(provider=provider_type, config=cfg)

View file

@ -21,7 +21,7 @@ class InstrumentationProvider(BaseModel):
""" """
provider: str = Field(description="Provider identifier for discriminated unions") provider: str = Field(description="Provider identifier for discriminated unions")
config: BaseModel config: BaseModel | None = Field(default=None, description="Optional configuration for the instrumentation provider. Most support configuration via environment variables.")
@abstractmethod @abstractmethod
def fastapi_middleware(self, app: FastAPI) -> None: def fastapi_middleware(self, app: FastAPI) -> None:

View file

@ -36,6 +36,9 @@ class OTelInstrumentationProvider(InstrumentationProvider):
def model_post_init(self, __context): def model_post_init(self, __context):
"""Initialize OpenTelemetry after Pydantic validation.""" """Initialize OpenTelemetry after Pydantic validation."""
# Provide default config if missing and validate type
if getattr(self, "config", None) is None:
self.config = OTelConfig()
assert isinstance(self.config, OTelConfig) # Type hint for IDE/linter assert isinstance(self.config, OTelConfig) # Type hint for IDE/linter
# Warn if OTLP endpoints not configured # Warn if OTLP endpoints not configured

View file

@ -7,11 +7,13 @@
import inspect import inspect
import sys import sys
from typing import Any, Protocol from typing import Any, Protocol
import yaml
from unittest.mock import AsyncMock, MagicMock from unittest.mock import AsyncMock, MagicMock
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.apis.inference import Inference from llama_stack.apis.inference import Inference
from llama_stack.apis.models import ModelInput
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
Api, Api,
Provider, Provider,
@ -21,6 +23,8 @@ from llama_stack.core.resolver import resolve_impls
from llama_stack.core.routers.inference import InferenceRouter from llama_stack.core.routers.inference import InferenceRouter
from llama_stack.core.routing_tables.models import ModelsRoutingTable from llama_stack.core.routing_tables.models import ModelsRoutingTable
from llama_stack.providers.datatypes import InlineProviderSpec, ProviderSpec from llama_stack.providers.datatypes import InlineProviderSpec, ProviderSpec
from llama_stack.core.datatypes import StackRunConfig
from llama_stack.providers.inline.instrumentation.otel.otel import OTelInstrumentationProvider
def add_protocol_methods(cls: type, protocol: type[Protocol]) -> None: def add_protocol_methods(cls: type, protocol: type[Protocol]) -> None:
@ -114,3 +118,52 @@ async def test_resolve_impls_basic():
assert impl.foo == "baz" assert impl.foo == "baz"
assert impl.__provider_id__ == "sample_provider" assert impl.__provider_id__ == "sample_provider"
assert impl.__provider_spec__ == provider_spec assert impl.__provider_spec__ == provider_spec
def test_instrumentation_provider_without_config_parses():
cfg = StackRunConfig(
image_name="unit-test",
apis=["inference"],
providers={
"inference": [
Provider(
provider_id="vllm",
provider_type="remote::vllm",
config={"url": "http://localhost:8000/v1"},
)
]
},
instrumentation=OTelInstrumentationProvider(
provider="otel",
),
models=[
ModelInput(model_id="meta-llama/Llama-3.2-1B-Instruct", provider_id="vllm")
],
)
assert cfg.instrumentation is not None
assert getattr(cfg.instrumentation, "provider", None) == "otel"
def test_instrumentation_yaml_without_config_parses():
yaml_config = """
image_name: unit-test-yaml
apis: [inference]
providers:
inference:
- provider_id: vllm
provider_type: remote::vllm
config:
url: http://localhost:8000/v1
instrumentation:
provider: otel
models:
- model_id: meta-llama/Llama-3.2-1B-Instruct
provider_id: vllm
"""
data = yaml.safe_load(yaml_config)
cfg = StackRunConfig(**data)
assert cfg.instrumentation is not None
assert getattr(cfg.instrumentation, "provider", None) == "otel"