mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
chore: telemetry test (#3405)
# What does this PR do? - removed fixed-duration sleeps ## Test Plan
This commit is contained in:
parent
d4e45cd5f1
commit
d2f88a10fb
3 changed files with 10 additions and 17 deletions
|
@ -49,16 +49,13 @@ def setup_openai_telemetry_data(llama_stack_client, text_model_id):
|
||||||
traces = llama_stack_client.telemetry.query_traces(limit=10)
|
traces = llama_stack_client.telemetry.query_traces(limit=10)
|
||||||
if len(traces) >= 5: # 5 OpenAI completion traces
|
if len(traces) >= 5: # 5 OpenAI completion traces
|
||||||
break
|
break
|
||||||
time.sleep(1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
if len(traces) < 5:
|
if len(traces) < 5:
|
||||||
pytest.fail(
|
pytest.fail(
|
||||||
f"Failed to create sufficient OpenAI completion telemetry data after 30s. Got {len(traces)} traces."
|
f"Failed to create sufficient OpenAI completion telemetry data after 30s. Got {len(traces)} traces."
|
||||||
)
|
)
|
||||||
|
|
||||||
# Wait for 5 seconds to ensure traces has completed logging
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
@ -185,11 +182,13 @@ def test_openai_completion_creates_telemetry(llama_stack_client, text_model_id):
|
||||||
assert len(response.choices) > 0, "Response should have at least one choice"
|
assert len(response.choices) > 0, "Response should have at least one choice"
|
||||||
|
|
||||||
# Wait for telemetry to be recorded
|
# Wait for telemetry to be recorded
|
||||||
time.sleep(3)
|
start_time = time.time()
|
||||||
|
while time.time() - start_time < 30:
|
||||||
# Check that we have more traces now
|
|
||||||
final_traces = llama_stack_client.telemetry.query_traces(limit=20)
|
final_traces = llama_stack_client.telemetry.query_traces(limit=20)
|
||||||
final_count = len(final_traces)
|
final_count = len(final_traces)
|
||||||
|
if final_count > initial_count:
|
||||||
|
break
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
# Should have at least as many traces as before (might have more due to other activity)
|
# Should have at least as many traces as before (might have more due to other activity)
|
||||||
assert final_count >= initial_count, "Should have at least as many traces after OpenAI call"
|
assert final_count >= initial_count, "Should have at least as many traces after OpenAI call"
|
||||||
|
|
|
@ -42,14 +42,11 @@ def setup_telemetry_data(llama_stack_client, text_model_id):
|
||||||
traces = llama_stack_client.telemetry.query_traces(limit=10)
|
traces = llama_stack_client.telemetry.query_traces(limit=10)
|
||||||
if len(traces) >= 4:
|
if len(traces) >= 4:
|
||||||
break
|
break
|
||||||
time.sleep(1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
if len(traces) < 4:
|
if len(traces) < 4:
|
||||||
pytest.fail(f"Failed to create sufficient telemetry data after 30s. Got {len(traces)} traces.")
|
pytest.fail(f"Failed to create sufficient telemetry data after 30s. Got {len(traces)} traces.")
|
||||||
|
|
||||||
# Wait for 5 seconds to ensure traces has completed logging
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -46,10 +46,7 @@ def setup_telemetry_metrics_data(openai_client, client_with_models, text_model_i
|
||||||
break
|
break
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
time.sleep(1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
# Wait additional time to ensure all metrics are processed
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
# Return the token lists for use in tests
|
# Return the token lists for use in tests
|
||||||
return {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
|
return {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue