chore(tests): normalize recording IDs and timestamps to reduce git diff noise (#3676)

IDs are now deterministic hashes based on request content, and
timestamps are normalized to constants, eliminating spurious changes
when re-recording tests.

## Changes
- Updated `inference_recorder.py` to normalize IDs and timestamps during
recording
- Added `scripts/normalize_recordings.py` utility to re-normalize
existing recordings
- Created documentation in `tests/integration/recordings/README.md`
- Normalized 350 existing recording files
This commit is contained in:
Ashwin Bharambe 2025-10-03 17:26:11 -07:00 committed by GitHub
parent 6bcd3e25f2
commit 3f36bfaeaa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
348 changed files with 10154 additions and 8329 deletions

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:34.037711241Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:34.234670218Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:34.430073402Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:34.629562851Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:34.828769603Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:35.027101431Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:35.228873906Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:35.429147653Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:35.626756664Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:35.822847752Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:36.021190515Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:36.228035317Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -238,7 +238,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:36.424413535Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -256,7 +256,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:36.62756048Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -274,7 +274,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:36.828422414Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -292,7 +292,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:37.033389762Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -310,7 +310,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:37.239556153Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -328,7 +328,7 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:37.448526412Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": false,
"done_reason": null,
"total_duration": null,
@ -346,15 +346,15 @@
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-10-01T01:36:37.648660737Z",
"created_at": "1970-01-01T00:00:00.000000Z",
"done": true,
"done_reason": "stop",
"total_duration": 6101960547,
"load_duration": 42550477,
"total_duration": 0,
"load_duration": 0,
"prompt_eval_count": 371,
"prompt_eval_duration": 2446898261,
"prompt_eval_duration": 0,
"eval_count": 19,
"eval_duration": 3611916940,
"eval_duration": 0,
"response": "",
"thinking": null,
"context": null