mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix(telemetry): token counters changed to histograms to reflect count per request
This commit is contained in:
parent
23fce9718c
commit
25051f1bf0
1 changed files with 3 additions and 0 deletions
|
|
@ -7,6 +7,7 @@
|
|||
"""Telemetry test configuration supporting both library and server test modes."""
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
|
|
@ -59,6 +60,8 @@ def llama_stack_client(telemetry_test_collector, request):
|
|||
@pytest.fixture
|
||||
def mock_otlp_collector(telemetry_test_collector):
|
||||
"""Provides access to telemetry data and clears between tests."""
|
||||
# prevent race conditions between tests caused by 200ms metric collection interval
|
||||
time.sleep(0.3)
|
||||
telemetry_test_collector.clear()
|
||||
try:
|
||||
yield telemetry_test_collector
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue