fix: use savepoints for row-by-row error isolation in Excel import

Prevents a single duplicate fall_id from rolling back the entire
import session. Each row insert now uses db.begin_nested() so
constraint violations are isolated to the offending row.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
CCS Admin 2026-02-24 09:01:42 +00:00
parent 47bd9c8a08
commit 0e4d19e8bc

View file

@ -494,12 +494,22 @@ def import_abrechnung_sheet(
abrechnung_datum=abrechnung_datum,
import_source=f"Abrechnung_DAK.xlsx:{sheet_name}",
)
# Use savepoint so a single row failure doesn't break the session
nested = db.begin_nested()
try:
db.add(case)
imported += 1
# Flush in batches of 100 to catch constraint violations early
if imported % 100 == 0:
db.flush()
nested.commit()
imported += 1
except Exception as flush_err:
nested.rollback()
errors.append(f"Row {row_num} ({nachname}): {flush_err}")
logger.warning(
"Import error in sheet '%s' row %d: %s",
sheet_name, row_num, flush_err,
)
skipped += 1
continue
except Exception as e:
nachname_display = _str_or_none(_get(row, col_map, "nachname")) or "?"
@ -509,10 +519,6 @@ def import_abrechnung_sheet(
sheet_name, row_num, e,
)
# Final flush
if imported > 0:
db.flush()
logger.info(
"Sheet '%s': %d imported, %d skipped, %d errors",
sheet_name, imported, skipped, len(errors),