From b7c276ea6d3e5dfa73e6998e1896d9fbdc242067 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Thu, 16 Oct 2025 10:47:35 -0700 Subject: [PATCH] chore: distrogen enables telemetry by default # What does this PR do? ## Test Plan Telemetry provider was added to all distributions in the latest commit but the protocol mapping was missing, causing a KeyError when the stack tried to validate provider compliance. --- llama_stack/core/resolver.py | 2 ++ llama_stack/distributions/ci-tests/run.yaml | 2 ++ llama_stack/distributions/dell/run-with-safety.yaml | 2 ++ llama_stack/distributions/dell/run.yaml | 2 ++ .../distributions/meta-reference-gpu/run-with-safety.yaml | 2 ++ llama_stack/distributions/meta-reference-gpu/run.yaml | 2 ++ llama_stack/distributions/nvidia/run-with-safety.yaml | 2 ++ llama_stack/distributions/nvidia/run.yaml | 2 ++ llama_stack/distributions/open-benchmark/run.yaml | 2 ++ llama_stack/distributions/postgres-demo/run.yaml | 2 ++ llama_stack/distributions/starter-gpu/run.yaml | 2 ++ llama_stack/distributions/starter/run.yaml | 2 ++ llama_stack/distributions/template.py | 7 +++++++ llama_stack/distributions/watsonx/run.yaml | 2 ++ 14 files changed, 33 insertions(+) diff --git a/llama_stack/core/resolver.py b/llama_stack/core/resolver.py index f2d7089a6..73c047979 100644 --- a/llama_stack/core/resolver.py +++ b/llama_stack/core/resolver.py @@ -26,6 +26,7 @@ from llama_stack.apis.safety import Safety from llama_stack.apis.scoring import Scoring from llama_stack.apis.scoring_functions import ScoringFunctions from llama_stack.apis.shields import Shields +from llama_stack.apis.telemetry import Telemetry from llama_stack.apis.tools import ToolGroups, ToolRuntime from llama_stack.apis.vector_io import VectorIO from llama_stack.apis.version import LLAMA_STACK_API_V1ALPHA @@ -94,6 +95,7 @@ def api_protocol_map(external_apis: dict[Api, ExternalApiSpec] | None = None) -> Api.files: Files, Api.prompts: Prompts, Api.conversations: Conversations, + Api.telemetry: Telemetry, } if external_apis: diff --git a/llama_stack/distributions/ci-tests/run.yaml b/llama_stack/distributions/ci-tests/run.yaml index e964c044c..a6a6b7c0d 100644 --- a/llama_stack/distributions/ci-tests/run.yaml +++ b/llama_stack/distributions/ci-tests/run.yaml @@ -237,3 +237,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/dell/run-with-safety.yaml b/llama_stack/distributions/dell/run-with-safety.yaml index fa8e63107..5da3cf511 100644 --- a/llama_stack/distributions/dell/run-with-safety.yaml +++ b/llama_stack/distributions/dell/run-with-safety.yaml @@ -122,3 +122,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/dell/run.yaml b/llama_stack/distributions/dell/run.yaml index ac6ce22b8..ac0fdc0fa 100644 --- a/llama_stack/distributions/dell/run.yaml +++ b/llama_stack/distributions/dell/run.yaml @@ -113,3 +113,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml index 3467fffa4..874c5050f 100644 --- a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml @@ -135,3 +135,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/meta-reference-gpu/run.yaml b/llama_stack/distributions/meta-reference-gpu/run.yaml index f8546205a..50553d2c7 100644 --- a/llama_stack/distributions/meta-reference-gpu/run.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run.yaml @@ -120,3 +120,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/nvidia/run-with-safety.yaml b/llama_stack/distributions/nvidia/run-with-safety.yaml index c56d9a7c1..e0482f67d 100644 --- a/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -118,3 +118,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/nvidia/run.yaml b/llama_stack/distributions/nvidia/run.yaml index 8608ca425..950782eed 100644 --- a/llama_stack/distributions/nvidia/run.yaml +++ b/llama_stack/distributions/nvidia/run.yaml @@ -97,3 +97,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/open-benchmark/run.yaml b/llama_stack/distributions/open-benchmark/run.yaml index 067584649..a738887b4 100644 --- a/llama_stack/distributions/open-benchmark/run.yaml +++ b/llama_stack/distributions/open-benchmark/run.yaml @@ -233,3 +233,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/postgres-demo/run.yaml b/llama_stack/distributions/postgres-demo/run.yaml index 69032becf..62faf3f62 100644 --- a/llama_stack/distributions/postgres-demo/run.yaml +++ b/llama_stack/distributions/postgres-demo/run.yaml @@ -104,3 +104,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/starter-gpu/run.yaml b/llama_stack/distributions/starter-gpu/run.yaml index d55e5e4be..370d4b516 100644 --- a/llama_stack/distributions/starter-gpu/run.yaml +++ b/llama_stack/distributions/starter-gpu/run.yaml @@ -240,3 +240,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/starter/run.yaml b/llama_stack/distributions/starter/run.yaml index f15be3cc5..2f4e7f350 100644 --- a/llama_stack/distributions/starter/run.yaml +++ b/llama_stack/distributions/starter/run.yaml @@ -237,3 +237,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true diff --git a/llama_stack/distributions/template.py b/llama_stack/distributions/template.py index 59beb8a8a..5d3410456 100644 --- a/llama_stack/distributions/template.py +++ b/llama_stack/distributions/template.py @@ -25,6 +25,7 @@ from llama_stack.core.datatypes import ( ModelInput, Provider, ShieldInput, + TelemetryConfig, ToolGroupInput, ) from llama_stack.core.distribution import get_provider_registry @@ -182,6 +183,11 @@ class RunConfigSettings(BaseModel): metadata_store: dict | None = None inference_store: dict | None = None conversations_store: dict | None = None + telemetry: TelemetryConfig | None = None + + def model_post_init(self, __context__: Any) -> None: + if self.telemetry is None: + self.telemetry = TelemetryConfig(enabled=True) def run_config( self, @@ -256,6 +262,7 @@ class RunConfigSettings(BaseModel): "server": { "port": 8321, }, + "telemetry": self.telemetry.model_dump(exclude_none=True) if self.telemetry else None, } diff --git a/llama_stack/distributions/watsonx/run.yaml b/llama_stack/distributions/watsonx/run.yaml index 6b925e180..c3db4eeb8 100644 --- a/llama_stack/distributions/watsonx/run.yaml +++ b/llama_stack/distributions/watsonx/run.yaml @@ -114,3 +114,5 @@ tool_groups: provider_id: rag-runtime server: port: 8321 +telemetry: + enabled: true