chore(tests): normalize recording IDs and timestamps to reduce git diff noise (#3676)

IDs are now deterministic hashes based on request content, and
timestamps are normalized to constants, eliminating spurious changes
when re-recording tests.

## Changes
- Updated `inference_recorder.py` to normalize IDs and timestamps during
recording
- Added `scripts/normalize_recordings.py` utility to re-normalize
existing recordings
- Created documentation in `tests/integration/recordings/README.md`
- Normalized 350 existing recording files
This commit is contained in:
Ashwin Bharambe 2025-10-03 17:26:11 -07:00 committed by GitHub
parent 6bcd3e25f2
commit 3f36bfaeaa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
348 changed files with 10154 additions and 8329 deletions

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:11.873171882Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:12.073738984Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:12.272476639Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:12.469220325Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:12.665965955Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:12.860442987Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:13.055440385Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:13.25612888Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:13.454322876Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:13.651445403Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:13.851107226Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:14.048095911Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -238,7 +238,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:14.250994986Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -256,7 +256,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:14.454971706Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -274,7 +274,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:14.654349738Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -292,7 +292,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:14.851507509Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -310,7 +310,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:15.044987002Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -328,7 +328,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:15.246563515Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -346,15 +346,15 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:15.447689838Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": true,
"done_reason": "stop",
"total_duration": 35945660492,
"load_duration": 42881569,
"total_duration": 0,
"load_duration": 0,
"prompt_eval_count": 386,
"prompt_eval_duration": 32326727198,
"prompt_eval_duration": 0,
"eval_count": 19,
"eval_duration": 3575452190,
"eval_duration": 0,
"response": "",
"thinking": null,
"context": null