From ddf3f1735a3fde2f25191244215ba802a439f7d5 Mon Sep 17 00:00:00 2001 From: Michael Dawson Date: Mon, 29 Sep 2025 17:09:08 -0400 Subject: [PATCH] fix: ensure usage is requested if telemetry is enabled (#3571) # What does this PR do? Refs: https://github.com/llamastack/llama-stack/issues/3420 When telemetry is enabled the router uncondionally expects the usage attribute to be availble and fails if it is not present. Usage is not currently being requested by litellm_openai_mixin.py for streaming requests when using the responses API which means that providers like vertexai fail if telemetry is enabled and streaming is used. This is part of the required fix. Other part is in liteLLM, will plan to submit PR for that soon. ## Test Plan I applied this change along with the change for litellm in a llama stack deployment and validated that I could make streaming requests through the responses API to a gemini model and they would succeed instead of failing due to the missing usage attribute when telemetry is enabled. Signed-off-by: Michael Dawson --- .../providers/utils/inference/litellm_openai_mixin.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index 966081e9f..10df664eb 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -374,6 +374,14 @@ class LiteLLMOpenAIMixin( top_p: float | None = None, user: str | None = None, ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]: + # Add usage tracking for streaming when telemetry is active + from llama_stack.providers.utils.telemetry.tracing import get_current_span + + if stream and get_current_span() is not None: + if stream_options is None: + stream_options = {"include_usage": True} + elif "include_usage" not in stream_options: + stream_options = {**stream_options, "include_usage": True} model_obj = await self.model_store.get_model(model) params = await prepare_openai_completion_params( model=self.get_litellm_model_name(model_obj.provider_resource_id),