From 80405da304e9ca5f1d0230f281f0ee5e557e8f60 Mon Sep 17 00:00:00 2001 From: Michael Dawson Date: Fri, 26 Sep 2025 15:50:11 -0400 Subject: [PATCH] fix: ensure usage is requested if telemetry is enabled Refs: https://github.com/llamastack/llama-stack/issues/3420 When telemetry is enabled the router uncondionally expects the usage attribute to be availble and fails if it is not present. Telemetry is not currently being requested by litellm_openai_mixin.py for streaming requests which means that providers like vertexai fail if telemetry is enabled and streaming is used. This is part of the required fix. Other part is in litell, will plan to submit PR for that soon. Signed-off-by: Michael Dawson --- .../providers/utils/inference/litellm_openai_mixin.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index 9bd43e4c9..cfad02688 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -399,6 +399,14 @@ class LiteLLMOpenAIMixin( top_p: float | None = None, user: str | None = None, ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: + # Add usage tracking for streaming when telemetry is active + from llama_stack.providers.utils.telemetry.tracing import get_current_span + + if stream and get_current_span() is not None: + if stream_options is None: + stream_options = {"include_usage": True} + elif "include_usage" not in stream_options: + stream_options = {**stream_options, "include_usage": True} model_obj = await self.model_store.get_model(model) params = await prepare_openai_completion_params( model=self.get_litellm_model_name(model_obj.provider_resource_id),