Add per-request AI logging, DB batch queue, WS entity updates, and UI polish

- log_thread.py: thread-safe ContextVar bridge so executor threads can log
  individual LLM calls and archive searches back to the event loop
- ai_log.py: init_thread_logging(), notify_entity_update(); WS now pushes
  entity_update messages when book data changes after any plugin or batch run
- batch.py: replace batch_pending.json with batch_queue SQLite table;
  run_batch_consumer() reads queue dynamically so new books can be added
  while batch is running; add_to_queue() deduplicates
- migrate.py: fix _migrate_v1 (clear-on-startup bug); add _migrate_v2 for
  batch_queue table
- _client.py / archive.py / identification.py: wrap each LLM API call and
  archive search with log_thread start/finish entries
- api.py: POST /api/batch returns {already_running, added}; notify_entity_update
  after identify pipeline
- models.default.yaml: strengthen ai_identify confidence-scoring instructions;
  warn against placeholder data
- detail-render.js: book log entries show clickable ID + spine thumbnail;
  book spine/title images open full-screen popup
- events.js: batch-start handles already_running+added; open-img-popup action
- init.js: entity_update WS handler; image popup close listeners
- overlays.css / index.html: full-screen image popup overlay
- eslint.config.js: add new globals; fix no-redeclare/no-unused-vars for
  multi-file global architecture; all lint errors resolved

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-11 12:10:54 +03:00
parent fd32be729f
commit b94f222c96
41 changed files with 2566 additions and 586 deletions

View File

@@ -26,6 +26,7 @@ from models import (
BoundaryDetectResult,
BookRow,
CandidateRecord,
IdentifyBlock,
PluginLookupResult,
TextRecognizeResult,
)
@@ -56,6 +57,7 @@ def _book(**kwargs: object) -> BookRow:
"analyzed_at": None,
"created_at": "2024-01-01T00:00:00",
"candidates": None,
"ai_blocks": None,
}
defaults.update(kwargs)
return BookRow(**defaults) # type: ignore[arg-type]
@@ -75,7 +77,7 @@ def seeded_db(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
c.execute("INSERT INTO cabinets VALUES (?,?,?,?,?,?,?,?)", ["c1", "r1", "Cabinet", None, None, None, 1, ts])
c.execute("INSERT INTO shelves VALUES (?,?,?,?,?,?,?,?)", ["s1", "c1", "Shelf", None, None, None, 1, ts])
c.execute(
"INSERT INTO books VALUES (?,?,0,NULL,'','','','','','','','','','','','','unidentified',0,NULL,?,NULL)",
"INSERT INTO books VALUES (?,?,0,NULL,'','','','','','','','','','','','','unidentified',0,NULL,?,NULL,NULL)",
["b1", "s1", ts],
)
c.commit()
@@ -93,6 +95,10 @@ class _BoundaryDetectorStub:
auto_queue = False
target = "books"
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@@ -109,6 +115,10 @@ class _BoundaryDetectorShelvesStub:
auto_queue = False
target = "shelves"
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@@ -124,6 +134,10 @@ class _TextRecognizerStub:
name = "Stub TR"
auto_queue = False
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@@ -139,19 +153,29 @@ class _BookIdentifierStub:
name = "Stub BI"
auto_queue = False
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@property
def confidence_threshold(self) -> float:
return 0.8
def identify(self, raw_text: str) -> AIIdentifyResult:
return {
"title": "Found Book",
"author": "Found Author",
"year": "2000",
"isbn": "",
"publisher": "",
"confidence": 0.9,
}
@property
def is_vlm(self) -> bool:
return False
def identify(
self,
raw_text: str,
archive_results: list[CandidateRecord],
images: list[tuple[str, str]],
) -> list[IdentifyBlock]:
return [IdentifyBlock(title="Found Book", author="Found Author", year="2000", score=0.9)]
class _ArchiveSearcherStub: