mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-11 05:38:38 +00:00
Merge 8f0413e743
into 48a551ecbc
This commit is contained in:
commit
b3271c6c9e
14 changed files with 905 additions and 17 deletions
170
tests/integration/agents/conftest.py
Normal file
170
tests/integration/agents/conftest.py
Normal file
|
@ -0,0 +1,170 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from collections.abc import AsyncGenerator, Callable
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.inference import ToolDefinition
|
||||
from llama_stack.apis.tools import ToolInvocationResult
|
||||
from llama_stack.providers.inline.agents.meta_reference.agent_instance import ChatAgent
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.config import (
|
||||
TelemetryConfig,
|
||||
TelemetrySink,
|
||||
)
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import (
|
||||
TelemetryAdapter,
|
||||
)
|
||||
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
|
||||
from llama_stack.providers.utils.kvstore.sqlite.sqlite import SqliteKVStoreImpl
|
||||
from llama_stack.providers.utils.telemetry import tracing as telemetry_tracing
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_agent_fixture():
|
||||
def _make(telemetry, kvstore) -> ChatAgent:
|
||||
agent = ChatAgent(
|
||||
agent_id="test-agent",
|
||||
agent_config=Mock(),
|
||||
inference_api=Mock(),
|
||||
safety_api=Mock(),
|
||||
tool_runtime_api=Mock(),
|
||||
tool_groups_api=Mock(),
|
||||
vector_io_api=Mock(),
|
||||
telemetry_api=telemetry,
|
||||
persistence_store=kvstore,
|
||||
created_at="2025-01-01T00:00:00Z",
|
||||
policy=[],
|
||||
)
|
||||
agent.agent_config.client_tools = []
|
||||
agent.agent_config.max_infer_iters = 5
|
||||
agent.input_shields = []
|
||||
agent.output_shields = []
|
||||
agent.tool_defs = [
|
||||
ToolDefinition(tool_name="web_search", description="", parameters={}),
|
||||
ToolDefinition(tool_name="knowledge_search", description="", parameters={}),
|
||||
]
|
||||
agent.tool_name_to_args = {}
|
||||
|
||||
# Stub tool runtime invoke_tool
|
||||
async def _mock_invoke_tool(
|
||||
*args: Any,
|
||||
tool_name: str | None = None,
|
||||
kwargs: dict | None = None,
|
||||
**extra: Any,
|
||||
):
|
||||
return ToolInvocationResult(content="Tool execution result")
|
||||
|
||||
agent.tool_runtime_api.invoke_tool = _mock_invoke_tool
|
||||
return agent
|
||||
|
||||
return _make
|
||||
|
||||
|
||||
def _chat_stream(tool_name: str | None, content: str = ""):
|
||||
from llama_stack.apis.common.content_types import (
|
||||
TextDelta,
|
||||
ToolCallDelta,
|
||||
ToolCallParseStatus,
|
||||
)
|
||||
from llama_stack.apis.inference import (
|
||||
ChatCompletionResponseEvent,
|
||||
ChatCompletionResponseEventType,
|
||||
ChatCompletionResponseStreamChunk,
|
||||
StopReason,
|
||||
)
|
||||
from llama_stack.models.llama.datatypes import ToolCall
|
||||
|
||||
async def gen():
|
||||
# Start
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
event_type=ChatCompletionResponseEventType.start,
|
||||
delta=TextDelta(text=""),
|
||||
)
|
||||
)
|
||||
|
||||
# Content
|
||||
if content:
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
event_type=ChatCompletionResponseEventType.progress,
|
||||
delta=TextDelta(text=content),
|
||||
)
|
||||
)
|
||||
|
||||
# Tool call if specified
|
||||
if tool_name:
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
event_type=ChatCompletionResponseEventType.progress,
|
||||
delta=ToolCallDelta(
|
||||
tool_call=ToolCall(call_id="call_0", tool_name=tool_name, arguments={}),
|
||||
parse_status=ToolCallParseStatus.succeeded,
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Complete
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
event_type=ChatCompletionResponseEventType.complete,
|
||||
delta=TextDelta(text=""),
|
||||
stop_reason=StopReason.end_of_turn,
|
||||
)
|
||||
)
|
||||
|
||||
return gen()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def telemetry(tmp_path: Path) -> AsyncGenerator[TelemetryAdapter, None]:
|
||||
db_path = tmp_path / "trace_store.db"
|
||||
cfg = TelemetryConfig(
|
||||
sinks=[TelemetrySink.CONSOLE, TelemetrySink.SQLITE],
|
||||
sqlite_db_path=str(db_path),
|
||||
)
|
||||
telemetry = TelemetryAdapter(cfg, deps={})
|
||||
telemetry_tracing.setup_logger(telemetry)
|
||||
try:
|
||||
yield telemetry
|
||||
finally:
|
||||
await telemetry.shutdown()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def kvstore(tmp_path: Path) -> SqliteKVStoreImpl:
|
||||
kv_path = tmp_path / "agent_kvstore.db"
|
||||
kv = SqliteKVStoreImpl(SqliteKVStoreConfig(db_path=str(kv_path)))
|
||||
await kv.initialize()
|
||||
return kv
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def span_patch():
|
||||
with (
|
||||
patch("llama_stack.providers.inline.agents.meta_reference.agent_instance.get_current_span") as mock_span,
|
||||
patch(
|
||||
"llama_stack.providers.utils.telemetry.tracing.generate_span_id",
|
||||
return_value="0000000000000abc",
|
||||
),
|
||||
):
|
||||
mock_span.return_value = Mock(get_span_context=Mock(return_value=Mock(trace_id=0x123, span_id=0xABC)))
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def make_completion_fn() -> Callable[[str | None, str], Callable]:
|
||||
def _factory(tool_name: str | None = None, content: str = "") -> Callable:
|
||||
async def chat_completion(*args: Any, **kwargs: Any):
|
||||
return _chat_stream(tool_name, content)
|
||||
|
||||
return chat_completion
|
||||
|
||||
return _factory
|
83
tests/integration/agents/test_agent_metrics_integration.py
Normal file
83
tests/integration/agents/test_agent_metrics_integration.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.providers.utils.telemetry import tracing as telemetry_tracing
|
||||
|
||||
|
||||
class TestAgentMetricsIntegration:
|
||||
async def test_agent_metrics_end_to_end(
|
||||
self: Any,
|
||||
telemetry: Any,
|
||||
kvstore: Any,
|
||||
make_agent_fixture: Any,
|
||||
span_patch: Any,
|
||||
make_completion_fn: Any,
|
||||
) -> None:
|
||||
from llama_stack.apis.inference import (
|
||||
SamplingParams,
|
||||
UserMessage,
|
||||
)
|
||||
|
||||
agent: Any = make_agent_fixture(telemetry, kvstore)
|
||||
|
||||
session_id = await agent.create_session("s")
|
||||
sampling_params = SamplingParams(max_tokens=64)
|
||||
|
||||
# single trace: plain, knowledge_search, web_search
|
||||
await telemetry_tracing.start_trace("agent_metrics")
|
||||
agent.inference_api.chat_completion = make_completion_fn(None, "Hello! I can help you with that.")
|
||||
async for _ in agent.run(
|
||||
session_id,
|
||||
"t1",
|
||||
[UserMessage(content="Hello")],
|
||||
sampling_params,
|
||||
stream=True,
|
||||
):
|
||||
pass
|
||||
agent.inference_api.chat_completion = make_completion_fn("knowledge_search", "")
|
||||
async for _ in agent.run(
|
||||
session_id,
|
||||
"t2",
|
||||
[UserMessage(content="Please search knowledge")],
|
||||
sampling_params,
|
||||
stream=True,
|
||||
):
|
||||
pass
|
||||
agent.inference_api.chat_completion = make_completion_fn("web_search", "")
|
||||
async for _ in agent.run(
|
||||
session_id,
|
||||
"t3",
|
||||
[UserMessage(content="Please search web")],
|
||||
sampling_params,
|
||||
stream=True,
|
||||
):
|
||||
pass
|
||||
await telemetry_tracing.end_trace()
|
||||
|
||||
# Poll briefly to avoid flake with async persistence
|
||||
tool_labels: set[str] = set()
|
||||
for _ in range(10):
|
||||
resp = await telemetry.query_metrics("llama_stack_agent_tool_calls_total", start_time=0, end_time=None)
|
||||
tool_labels.clear()
|
||||
for series in getattr(resp, "data", []) or []:
|
||||
for lbl in getattr(series, "labels", []) or []:
|
||||
name = getattr(lbl, "name", None) or getattr(lbl, "key", None)
|
||||
value = getattr(lbl, "value", None)
|
||||
if name == "tool" and value:
|
||||
tool_labels.add(value)
|
||||
|
||||
# Look for both web_search AND some form of knowledge search
|
||||
if ("web_search" in tool_labels) and ("rag" in tool_labels or "knowledge_search" in tool_labels):
|
||||
break
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# More descriptive assertion
|
||||
assert bool(tool_labels & {"web_search", "rag", "knowledge_search"}), (
|
||||
f"Expected tool calls not found. Got: {tool_labels}"
|
||||
)
|
5
tests/unit/providers/agents/__init__.py
Normal file
5
tests/unit/providers/agents/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
212
tests/unit/providers/agents/test_agent_metrics.py
Normal file
212
tests/unit/providers/agents/test_agent_metrics.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
import pytest
|
||||
from opentelemetry.trace import SpanContext, TraceFlags
|
||||
|
||||
from llama_stack.providers.inline.agents.meta_reference.agent_instance import ChatAgent
|
||||
|
||||
|
||||
class FakeSpan:
|
||||
def __init__(self, trace_id: int = 123, span_id: int = 456):
|
||||
self._context = SpanContext(
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
is_remote=False,
|
||||
trace_flags=TraceFlags(0x01),
|
||||
)
|
||||
|
||||
def get_span_context(self):
|
||||
return self._context
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent_with_telemetry():
|
||||
"""Create a real ChatAgent with telemetry API"""
|
||||
telemetry_api = AsyncMock()
|
||||
|
||||
agent = ChatAgent(
|
||||
agent_id="test-agent",
|
||||
agent_config=Mock(),
|
||||
inference_api=Mock(),
|
||||
safety_api=Mock(),
|
||||
tool_runtime_api=Mock(),
|
||||
tool_groups_api=Mock(),
|
||||
vector_io_api=Mock(),
|
||||
telemetry_api=telemetry_api,
|
||||
persistence_store=Mock(),
|
||||
created_at="2025-01-01T00:00:00Z",
|
||||
policy=[],
|
||||
)
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent_without_telemetry():
|
||||
"""Create a real ChatAgent without telemetry API"""
|
||||
agent = ChatAgent(
|
||||
agent_id="test-agent",
|
||||
agent_config=Mock(),
|
||||
inference_api=Mock(),
|
||||
safety_api=Mock(),
|
||||
tool_runtime_api=Mock(),
|
||||
tool_groups_api=Mock(),
|
||||
vector_io_api=Mock(),
|
||||
telemetry_api=None,
|
||||
persistence_store=Mock(),
|
||||
created_at="2025-01-01T00:00:00Z",
|
||||
policy=[],
|
||||
)
|
||||
return agent
|
||||
|
||||
|
||||
class TestAgentMetrics:
|
||||
def test_step_execution_metrics(self, agent_with_telemetry, monkeypatch):
|
||||
"""Test that step execution metrics are emitted correctly"""
|
||||
fake_span = FakeSpan()
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.get_current_span", lambda: fake_span
|
||||
)
|
||||
|
||||
# Capture the metric instead of actually creating async task
|
||||
captured_metrics = []
|
||||
|
||||
async def capture_metric(metric):
|
||||
captured_metrics.append(metric)
|
||||
|
||||
monkeypatch.setattr(agent_with_telemetry.telemetry_api, "log_event", capture_metric)
|
||||
|
||||
def mock_create_task(coro, *, name=None):
|
||||
return asyncio.run(coro)
|
||||
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.asyncio.create_task", mock_create_task
|
||||
)
|
||||
|
||||
agent_with_telemetry._track_step()
|
||||
|
||||
assert len(captured_metrics) == 1
|
||||
metric = captured_metrics[0]
|
||||
assert metric.metric == "llama_stack_agent_steps_total"
|
||||
assert metric.value == 1
|
||||
assert metric.unit == "1"
|
||||
assert metric.attributes["agent_id"] == "test-agent"
|
||||
|
||||
def test_workflow_completion_metrics(self, agent_with_telemetry, monkeypatch):
|
||||
"""Test that workflow completion metrics are emitted correctly"""
|
||||
fake_span = FakeSpan()
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.get_current_span", lambda: fake_span
|
||||
)
|
||||
|
||||
captured_metrics = []
|
||||
|
||||
async def capture_metric(metric):
|
||||
captured_metrics.append(metric)
|
||||
|
||||
monkeypatch.setattr(agent_with_telemetry.telemetry_api, "log_event", capture_metric)
|
||||
|
||||
def mock_create_task(coro, *, name=None):
|
||||
return asyncio.run(coro)
|
||||
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.asyncio.create_task", mock_create_task
|
||||
)
|
||||
|
||||
agent_with_telemetry._track_workflow("completed", 2.5)
|
||||
|
||||
assert len(captured_metrics) == 2
|
||||
|
||||
# Check workflow count metric
|
||||
count_metric = captured_metrics[0]
|
||||
assert count_metric.metric == "llama_stack_agent_workflows_total"
|
||||
assert count_metric.value == 1
|
||||
assert count_metric.attributes["status"] == "completed"
|
||||
|
||||
# Check duration metric
|
||||
duration_metric = captured_metrics[1]
|
||||
assert duration_metric.metric == "llama_stack_agent_workflow_duration_seconds"
|
||||
assert duration_metric.value == 2.5
|
||||
assert duration_metric.unit == "s"
|
||||
|
||||
def test_tool_usage_metrics(self, agent_with_telemetry, monkeypatch):
|
||||
"""Test that tool usage metrics are emitted correctly"""
|
||||
fake_span = FakeSpan()
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.get_current_span", lambda: fake_span
|
||||
)
|
||||
|
||||
captured_metrics = []
|
||||
|
||||
async def capture_metric(metric):
|
||||
captured_metrics.append(metric)
|
||||
|
||||
monkeypatch.setattr(agent_with_telemetry.telemetry_api, "log_event", capture_metric)
|
||||
|
||||
def mock_create_task(coro, *, name=None):
|
||||
return asyncio.run(coro)
|
||||
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.asyncio.create_task", mock_create_task
|
||||
)
|
||||
|
||||
agent_with_telemetry._track_tool("web_search")
|
||||
|
||||
assert len(captured_metrics) == 1
|
||||
metric = captured_metrics[0]
|
||||
assert metric.metric == "llama_stack_agent_tool_calls_total"
|
||||
assert metric.attributes["tool"] == "web_search"
|
||||
|
||||
def test_knowledge_search_tool_mapping(self, agent_with_telemetry, monkeypatch):
|
||||
"""Test that knowledge_search tool is mapped to rag"""
|
||||
fake_span = FakeSpan()
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.get_current_span", lambda: fake_span
|
||||
)
|
||||
|
||||
captured_metrics = []
|
||||
|
||||
async def capture_metric(metric):
|
||||
captured_metrics.append(metric)
|
||||
|
||||
monkeypatch.setattr(agent_with_telemetry.telemetry_api, "log_event", capture_metric)
|
||||
|
||||
def mock_create_task(coro, *, name=None):
|
||||
return asyncio.run(coro)
|
||||
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.asyncio.create_task", mock_create_task
|
||||
)
|
||||
|
||||
agent_with_telemetry._track_tool("knowledge_search")
|
||||
|
||||
assert len(captured_metrics) == 1
|
||||
metric = captured_metrics[0]
|
||||
assert metric.attributes["tool"] == "rag"
|
||||
|
||||
def test_no_telemetry_api(self, agent_without_telemetry):
|
||||
"""Test that methods work gracefully when telemetry_api is None"""
|
||||
# These should not crash
|
||||
agent_without_telemetry._track_step()
|
||||
agent_without_telemetry._track_workflow("failed", 1.0)
|
||||
agent_without_telemetry._track_tool("web_search")
|
||||
|
||||
def test_no_active_span(self, agent_with_telemetry, monkeypatch):
|
||||
"""Test that methods work gracefully when no span is active"""
|
||||
monkeypatch.setattr(
|
||||
"llama_stack.providers.inline.agents.meta_reference.agent_instance.get_current_span", lambda: None
|
||||
)
|
||||
|
||||
# These should not crash and should not call telemetry
|
||||
agent_with_telemetry._track_step()
|
||||
agent_with_telemetry._track_workflow("failed", 1.0)
|
||||
agent_with_telemetry._track_tool("web_search")
|
||||
|
||||
# Telemetry should not have been called
|
||||
agent_with_telemetry.telemetry_api.log_event.assert_not_called()
|
5
tests/unit/providers/telemetry/__init__.py
Normal file
5
tests/unit/providers/telemetry/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
244
tests/unit/providers/telemetry/test_agent_metrics_histogram.py
Normal file
244
tests/unit/providers/telemetry/test_agent_metrics_histogram.py
Normal file
|
@ -0,0 +1,244 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.telemetry import MetricEvent, MetricType
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.config import TelemetryConfig
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import TelemetryAdapter
|
||||
|
||||
|
||||
class TestAgentMetricsHistogram:
|
||||
"""Tests for agent histogram metrics"""
|
||||
|
||||
@pytest.fixture
|
||||
def config(self):
|
||||
return TelemetryConfig(service_name="test-service", sinks=[])
|
||||
|
||||
@pytest.fixture
|
||||
def adapter(self, config):
|
||||
adapter = TelemetryAdapter(config, {})
|
||||
adapter.meter = Mock() # skip otel setup
|
||||
return adapter
|
||||
|
||||
def test_histogram_creation(self, adapter):
|
||||
mock_hist = Mock()
|
||||
adapter.meter.create_histogram.return_value = mock_hist
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {}
|
||||
|
||||
result = adapter._get_or_create_histogram("test_histogram", "s")
|
||||
|
||||
assert result == mock_hist
|
||||
adapter.meter.create_histogram.assert_called_once_with(
|
||||
name="test_histogram",
|
||||
unit="s",
|
||||
description="test histogram",
|
||||
)
|
||||
assert _GLOBAL_STORAGE["histograms"]["test_histogram"] == mock_hist
|
||||
|
||||
def test_histogram_reuse(self, adapter):
|
||||
mock_hist = Mock()
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {"existing_histogram": mock_hist}
|
||||
|
||||
result = adapter._get_or_create_histogram("existing_histogram", "ms")
|
||||
|
||||
assert result == mock_hist
|
||||
adapter.meter.create_histogram.assert_not_called()
|
||||
|
||||
def test_workflow_duration_histogram(self, adapter):
|
||||
mock_hist = Mock()
|
||||
adapter.meter.create_histogram.return_value = mock_hist
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {}
|
||||
|
||||
event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="llama_stack_agent_workflow_duration_seconds",
|
||||
value=15.7,
|
||||
timestamp=1234567890.0,
|
||||
unit="s",
|
||||
attributes={"agent_id": "test-agent"},
|
||||
metric_type=MetricType.HISTOGRAM,
|
||||
)
|
||||
|
||||
adapter._log_metric(event)
|
||||
|
||||
adapter.meter.create_histogram.assert_called_once_with(
|
||||
name="llama_stack_agent_workflow_duration_seconds",
|
||||
unit="s",
|
||||
description="llama stack agent workflow duration seconds",
|
||||
)
|
||||
mock_hist.record.assert_called_once_with(15.7, attributes={"agent_id": "test-agent"})
|
||||
|
||||
def test_duration_buckets_configured_via_views(self, adapter):
|
||||
mock_hist = Mock()
|
||||
adapter.meter.create_histogram.return_value = mock_hist
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {}
|
||||
|
||||
event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="custom_duration_seconds",
|
||||
value=5.2,
|
||||
timestamp=1234567890.0,
|
||||
unit="s",
|
||||
attributes={},
|
||||
metric_type=MetricType.HISTOGRAM,
|
||||
)
|
||||
|
||||
adapter._log_metric(event)
|
||||
|
||||
# buckets configured via otel views, not passed to create_histogram
|
||||
mock_hist.record.assert_called_once_with(5.2, attributes={})
|
||||
|
||||
def test_non_duration_uses_counter(self, adapter):
|
||||
mock_counter = Mock()
|
||||
adapter.meter.create_counter.return_value = mock_counter
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["counters"] = {}
|
||||
|
||||
event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="llama_stack_agent_workflows_total",
|
||||
value=1,
|
||||
timestamp=1234567890.0,
|
||||
unit="1",
|
||||
attributes={"agent_id": "test-agent", "status": "completed"},
|
||||
)
|
||||
|
||||
adapter._log_metric(event)
|
||||
|
||||
adapter.meter.create_counter.assert_called_once()
|
||||
adapter.meter.create_histogram.assert_not_called()
|
||||
mock_counter.add.assert_called_once_with(1, attributes={"agent_id": "test-agent", "status": "completed"})
|
||||
|
||||
def test_no_meter_doesnt_crash(self, adapter):
|
||||
adapter.meter = None
|
||||
|
||||
event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="test_duration_seconds",
|
||||
value=1.0,
|
||||
timestamp=1234567890.0,
|
||||
unit="s",
|
||||
attributes={},
|
||||
)
|
||||
|
||||
adapter._log_metric(event) # shouldn't crash
|
||||
|
||||
def test_histogram_vs_counter_by_type(self, adapter):
|
||||
mock_hist = Mock()
|
||||
mock_counter = Mock()
|
||||
adapter.meter.create_histogram.return_value = mock_hist
|
||||
adapter.meter.create_counter.return_value = mock_counter
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {}
|
||||
_GLOBAL_STORAGE["counters"] = {}
|
||||
|
||||
# histogram metric
|
||||
hist_event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="workflow_duration_seconds",
|
||||
value=1.0,
|
||||
timestamp=1234567890.0,
|
||||
unit="s",
|
||||
attributes={},
|
||||
metric_type=MetricType.HISTOGRAM,
|
||||
)
|
||||
adapter._log_metric(hist_event)
|
||||
mock_hist.record.assert_called()
|
||||
|
||||
# counter metric (default type)
|
||||
counter_event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="workflow_total",
|
||||
value=1,
|
||||
timestamp=1234567890.0,
|
||||
unit="1",
|
||||
attributes={},
|
||||
)
|
||||
adapter._log_metric(counter_event)
|
||||
mock_counter.add.assert_called()
|
||||
|
||||
def test_storage_separation(self, adapter):
|
||||
mock_hist = Mock()
|
||||
mock_counter = Mock()
|
||||
adapter.meter.create_histogram.return_value = mock_hist
|
||||
adapter.meter.create_counter.return_value = mock_counter
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {}
|
||||
_GLOBAL_STORAGE["counters"] = {}
|
||||
|
||||
# create both types
|
||||
hist_event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="test_duration_seconds",
|
||||
value=1.0,
|
||||
timestamp=1234567890.0,
|
||||
unit="s",
|
||||
attributes={},
|
||||
metric_type=MetricType.HISTOGRAM,
|
||||
)
|
||||
counter_event = MetricEvent(
|
||||
trace_id="123",
|
||||
span_id="456",
|
||||
metric="test_counter",
|
||||
value=1,
|
||||
timestamp=1234567890.0,
|
||||
unit="1",
|
||||
attributes={},
|
||||
)
|
||||
|
||||
adapter._log_metric(hist_event)
|
||||
adapter._log_metric(counter_event)
|
||||
|
||||
# check they're stored separately
|
||||
assert "test_duration_seconds" in _GLOBAL_STORAGE["histograms"]
|
||||
assert "test_counter" in _GLOBAL_STORAGE["counters"]
|
||||
assert "test_duration_seconds" not in _GLOBAL_STORAGE["counters"]
|
||||
assert "test_counter" not in _GLOBAL_STORAGE["histograms"]
|
||||
|
||||
def test_histogram_uses_views_for_buckets(self, adapter):
|
||||
mock_hist = Mock()
|
||||
adapter.meter.create_histogram.return_value = mock_hist
|
||||
|
||||
from llama_stack.providers.inline.telemetry.meta_reference.telemetry import _GLOBAL_STORAGE
|
||||
|
||||
_GLOBAL_STORAGE["histograms"] = {}
|
||||
|
||||
result = adapter._get_or_create_histogram("test_histogram", "s")
|
||||
|
||||
# buckets come from otel views, not create_histogram params
|
||||
adapter.meter.create_histogram.assert_called_once_with(
|
||||
name="test_histogram",
|
||||
unit="s",
|
||||
description="test histogram",
|
||||
)
|
||||
assert result == mock_hist
|
Loading…
Add table
Add a link
Reference in a new issue