cms.c2sgmbh/scripts/backup/backup-db.sh
Martin Porwoll a066539129 feat: add automated PostgreSQL backup system with S3 offsite storage
- Add backup-db.sh for daily automated backups via cron
- Add restore-db.sh for interactive database restoration
- Add setup-backup.sh for easy setup on new servers
- Support local and S3 (Hetzner Object Storage) backup locations
- 30-day retention with automatic cleanup
- Credentials stored securely in ~/.pgpass and ~/.s3cfg
- Comprehensive documentation with disaster recovery checklist

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-11 14:10:39 +00:00

347 lines
10 KiB
Bash
Executable file

#!/bin/bash
#
# Payload CMS - Tägliches PostgreSQL Backup-Skript
# Erstellt: 11.12.2025
# Aktualisiert: 11.12.2025 (S3 Offsite-Backup)
#
# Verwendung:
# ./backup-db.sh # Normales Backup
# ./backup-db.sh --verbose # Mit detaillierter Ausgabe
#
# Voraussetzungen:
# - ~/.pgpass mit korrekten Berechtigungen (chmod 600)
# - ~/.s3cfg mit korrekten Berechtigungen (chmod 600) für Offsite-Backup
#
set -euo pipefail
# ============================================================================
# Konfiguration
# ============================================================================
# Datenbank-Verbindung (Passwort via ~/.pgpass)
DB_HOST="${PGHOST:-10.10.181.101}"
DB_PORT="${PGPORT:-5432}"
DB_NAME="${PGDATABASE:-payload_db}"
DB_USER="${PGUSER:-payload}"
# Backup-Verzeichnis
BACKUP_DIR="/home/payload/backups/postgres"
# Retention: Anzahl der Tage, die Backups behalten werden (lokal und S3)
RETENTION_DAYS=30
# Log-Datei und maximale Größe (10MB)
LOG_FILE="${BACKUP_DIR}/backup.log"
LOG_MAX_SIZE=$((10 * 1024 * 1024))
# S3 Offsite-Backup (Credentials via ~/.s3cfg)
S3_ENABLED="${S3_BACKUP_ENABLED:-true}"
S3_BUCKET="${S3_BUCKET:-c2s}"
S3_PATH="${S3_PATH:-backups/postgres}"
# ============================================================================
# Variablen
# ============================================================================
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
BACKUP_FILE="${BACKUP_DIR}/payload_db_${TIMESTAMP}.sql.gz"
VERBOSE=${1:-""}
# ============================================================================
# Funktionen
# ============================================================================
log() {
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
echo "$message" >> "$LOG_FILE"
if [[ "$VERBOSE" == "--verbose" ]]; then
echo "$message"
fi
}
log_error() {
local message="[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $1"
echo "$message" >> "$LOG_FILE"
echo "$message" >&2
}
rotate_log() {
if [[ -f "$LOG_FILE" ]]; then
local log_size
log_size=$(stat -c%s "$LOG_FILE" 2>/dev/null || echo 0)
if [[ $log_size -gt $LOG_MAX_SIZE ]]; then
# Behalte die letzten 1000 Zeilen
tail -1000 "$LOG_FILE" > "${LOG_FILE}.tmp"
mv "${LOG_FILE}.tmp" "$LOG_FILE"
log "Log-Datei rotiert (war ${log_size} Bytes)"
fi
fi
}
check_pgpass() {
local pgpass_file="$HOME/.pgpass"
if [[ ! -f "$pgpass_file" ]]; then
log_error ".pgpass Datei nicht gefunden: $pgpass_file"
log_error "Erstelle sie mit: echo 'host:port:database:user:password' > ~/.pgpass && chmod 600 ~/.pgpass"
exit 1
fi
local perms
perms=$(stat -c%a "$pgpass_file" 2>/dev/null || echo "000")
if [[ "$perms" != "600" ]]; then
log_error ".pgpass hat unsichere Berechtigungen: $perms (erwartet: 600)"
log_error "Behebe mit: chmod 600 ~/.pgpass"
exit 1
fi
}
check_s3cfg() {
local s3cfg_file="$HOME/.s3cfg"
if [[ ! -f "$s3cfg_file" ]]; then
log "WARNUNG: .s3cfg nicht gefunden - S3-Upload wird übersprungen"
return 1
fi
local perms
perms=$(stat -c%a "$s3cfg_file" 2>/dev/null || echo "000")
if [[ "$perms" != "600" ]]; then
log_error ".s3cfg hat unsichere Berechtigungen: $perms (erwartet: 600)"
log_error "Behebe mit: chmod 600 ~/.s3cfg"
return 1
fi
if ! command -v s3cmd &> /dev/null; then
log "WARNUNG: s3cmd nicht installiert - S3-Upload wird übersprungen"
return 1
fi
return 0
}
upload_to_s3() {
local backup_file="$1"
local filename
filename=$(basename "$backup_file")
log "--- S3 Offsite-Backup ---"
log "Lade hoch nach s3://${S3_BUCKET}/${S3_PATH}/"
local start_time
start_time=$(date +%s)
if s3cmd put "$backup_file" "s3://${S3_BUCKET}/${S3_PATH}/" 2>> "$LOG_FILE"; then
local end_time
end_time=$(date +%s)
local duration=$((end_time - start_time))
log "S3-Upload erfolgreich: $filename (${duration}s)"
return 0
else
log_error "S3-Upload fehlgeschlagen für: $filename"
return 1
fi
}
cleanup_s3_backups() {
log "Prüfe S3 auf alte Backups..."
local cutoff_date
cutoff_date=$(date -d "-${RETENTION_DAYS} days" +"%Y-%m-%d")
local deleted_count=0
# Liste alle Backups im S3-Bucket
while IFS= read -r line; do
# Format: 2025-12-11 09:54 42356 s3://c2s/backups/postgres/payload_db_...
local file_date file_path
file_date=$(echo "$line" | awk '{print $1}')
file_path=$(echo "$line" | awk '{print $4}')
if [[ -n "$file_path" && "$file_date" < "$cutoff_date" ]]; then
if s3cmd del "$file_path" 2>> "$LOG_FILE"; then
log "S3 gelöscht: $(basename "$file_path")"
((deleted_count++))
fi
fi
done < <(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz")
if [[ $deleted_count -gt 0 ]]; then
log "S3: $deleted_count alte Backups gelöscht"
else
log "S3: Keine alten Backups zum Löschen"
fi
}
get_s3_stats() {
local s3_count s3_size
s3_count=$(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep -c "payload_db_.*\.sql\.gz" || echo 0)
s3_size=$(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz" | awk '{sum+=$3} END {print sum}' || echo 0)
# Bytes zu lesbarer Größe konvertieren (ohne bc)
local s3_size_human
if [[ $s3_size -gt 1073741824 ]]; then
s3_size_human="$((s3_size / 1073741824))G"
elif [[ $s3_size -gt 1048576 ]]; then
s3_size_human="$((s3_size / 1048576))M"
elif [[ $s3_size -gt 1024 ]]; then
s3_size_human="$((s3_size / 1024))K"
else
s3_size_human="${s3_size}B"
fi
log "S3 Backups: $s3_count (${s3_size_human})"
}
cleanup_old_backups() {
log "Lösche lokale Backups älter als ${RETENTION_DAYS} Tage..."
local deleted_count=0
while IFS= read -r -d '' file; do
rm -f "$file"
log "Gelöscht: $(basename "$file")"
((deleted_count++))
done < <(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -mtime +${RETENTION_DAYS} -print0 2>/dev/null)
if [[ $deleted_count -gt 0 ]]; then
log "Lokal: $deleted_count alte Backups gelöscht"
else
log "Lokal: Keine alten Backups zum Löschen"
fi
}
get_backup_stats() {
local backup_count
local total_size
local oldest_backup
local newest_backup
backup_count=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f 2>/dev/null | wc -l)
total_size=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -exec du -ch {} + 2>/dev/null | tail -1 | cut -f1)
oldest_backup=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -printf '%T+ %p\n' 2>/dev/null | sort | head -1 | cut -d' ' -f2 | xargs basename 2>/dev/null || echo "N/A")
newest_backup=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -printf '%T+ %p\n' 2>/dev/null | sort -r | head -1 | cut -d' ' -f2 | xargs basename 2>/dev/null || echo "N/A")
log "--- Backup-Statistik ---"
log "Lokale Backups: $backup_count (${total_size:-0})"
log "Ältestes: $oldest_backup"
log "Neuestes: $newest_backup"
}
# ============================================================================
# Hauptprogramm
# ============================================================================
main() {
# Log-Rotation am Anfang
rotate_log
log "=========================================="
log "Starte PostgreSQL Backup für $DB_NAME"
log "=========================================="
# Prüfe ob Backup-Verzeichnis existiert
if [[ ! -d "$BACKUP_DIR" ]]; then
mkdir -p "$BACKUP_DIR"
log "Backup-Verzeichnis erstellt: $BACKUP_DIR"
fi
# Prüfe .pgpass
check_pgpass
# Prüfe Datenbankverbindung
log "Prüfe Datenbankverbindung..."
if ! psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" > /dev/null 2>&1; then
log_error "Datenbankverbindung fehlgeschlagen!"
log_error "Prüfe ~/.pgpass und Netzwerkverbindung zu $DB_HOST:$DB_PORT"
exit 1
fi
log "Datenbankverbindung erfolgreich"
# Erstelle Backup
log "Erstelle Backup: $(basename "$BACKUP_FILE")"
local start_time
start_time=$(date +%s)
if pg_dump \
-h "$DB_HOST" \
-p "$DB_PORT" \
-U "$DB_USER" \
-d "$DB_NAME" \
--format=plain \
--no-owner \
--no-privileges \
--clean \
--if-exists \
2>> "$LOG_FILE" | gzip > "$BACKUP_FILE"; then
local end_time
end_time=$(date +%s)
local duration=$((end_time - start_time))
local file_size
file_size=$(du -h "$BACKUP_FILE" | cut -f1)
log "Backup erfolgreich erstellt!"
log "Datei: $(basename "$BACKUP_FILE")"
log "Größe: $file_size"
log "Dauer: ${duration}s"
# Verifiziere Backup
if gzip -t "$BACKUP_FILE" 2>/dev/null; then
log "Backup-Integrität: OK (gzip-Test bestanden)"
else
log_error "Backup-Integrität: FEHLER (gzip-Test fehlgeschlagen)"
exit 1
fi
else
log_error "Backup fehlgeschlagen!"
rm -f "$BACKUP_FILE"
exit 1
fi
# S3 Offsite-Backup
local s3_success=false
if [[ "$S3_ENABLED" == "true" ]]; then
if check_s3cfg; then
if upload_to_s3 "$BACKUP_FILE"; then
s3_success=true
cleanup_s3_backups
fi
else
log "S3-Backup übersprungen (Konfiguration fehlt)"
fi
else
log "S3-Backup deaktiviert (S3_BACKUP_ENABLED=false)"
fi
# Lösche alte lokale Backups
cleanup_old_backups
# Zeige Statistik
get_backup_stats
if [[ "$s3_success" == "true" ]]; then
get_s3_stats
fi
log "=========================================="
log "Backup abgeschlossen"
log "=========================================="
# Ausgabe für Cron (nur bei Erfolg, nicht-verbose)
if [[ "$VERBOSE" != "--verbose" ]]; then
if [[ "$s3_success" == "true" ]]; then
echo "Backup OK: $(basename "$BACKUP_FILE") ($file_size) + S3"
else
echo "Backup OK: $(basename "$BACKUP_FILE") ($file_size)"
fi
fi
}
# Skript ausführen
main "$@"