mirror of
https://github.com/complexcaresolutions/cms.c2sgmbh.git
synced 2026-03-17 22:04:10 +00:00
- Add backup-db.sh for daily automated backups via cron - Add restore-db.sh for interactive database restoration - Add setup-backup.sh for easy setup on new servers - Support local and S3 (Hetzner Object Storage) backup locations - 30-day retention with automatic cleanup - Credentials stored securely in ~/.pgpass and ~/.s3cfg - Comprehensive documentation with disaster recovery checklist 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
487 lines
14 KiB
Bash
Executable file
487 lines
14 KiB
Bash
Executable file
#!/bin/bash
|
|
#
|
|
# Payload CMS - PostgreSQL Restore-Skript
|
|
# Stellt die Datenbank aus einem Backup wieder her.
|
|
#
|
|
# Verwendung:
|
|
# ./restore-db.sh # Interaktiv (zeigt verfügbare Backups)
|
|
# ./restore-db.sh <backup-datei> # Direktes Restore
|
|
# ./restore-db.sh --from-s3 # Restore aus S3 (interaktiv)
|
|
# ./restore-db.sh --from-s3 <backup-name> # Restore aus S3 (direkt)
|
|
# ./restore-db.sh --list # Backups auflisten
|
|
# ./restore-db.sh --list-s3 # S3-Backups auflisten
|
|
#
|
|
# Voraussetzungen:
|
|
# - ~/.pgpass mit korrekten Berechtigungen (chmod 600)
|
|
# - ~/.s3cfg für S3-Restore (chmod 600)
|
|
#
|
|
|
|
set -euo pipefail
|
|
|
|
# ============================================================================
|
|
# Konfiguration
|
|
# ============================================================================
|
|
|
|
# Datenbank-Verbindung (Passwort via ~/.pgpass)
|
|
DB_HOST="${PGHOST:-10.10.181.101}"
|
|
DB_PORT="${PGPORT:-5432}"
|
|
DB_NAME="${PGDATABASE:-payload_db}"
|
|
DB_USER="${PGUSER:-payload}"
|
|
|
|
# Backup-Verzeichnis
|
|
BACKUP_DIR="/home/payload/backups/postgres"
|
|
|
|
# S3-Konfiguration
|
|
S3_BUCKET="${S3_BUCKET:-c2s}"
|
|
S3_PATH="${S3_PATH:-backups/postgres}"
|
|
|
|
# Temporäres Verzeichnis für S3-Downloads
|
|
TEMP_DIR="/tmp/payload-restore"
|
|
|
|
# ============================================================================
|
|
# Farben und Formatierung
|
|
# ============================================================================
|
|
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# ============================================================================
|
|
# Funktionen
|
|
# ============================================================================
|
|
|
|
print_header() {
|
|
echo ""
|
|
echo -e "${BLUE}==============================================${NC}"
|
|
echo -e "${BLUE} Payload CMS - Datenbank Restore${NC}"
|
|
echo -e "${BLUE}==============================================${NC}"
|
|
echo ""
|
|
}
|
|
|
|
print_step() {
|
|
echo -e "${BLUE}[$(date '+%H:%M:%S')]${NC} $1"
|
|
}
|
|
|
|
print_success() {
|
|
echo -e "${GREEN}[✓]${NC} $1"
|
|
}
|
|
|
|
print_warning() {
|
|
echo -e "${YELLOW}[!]${NC} $1"
|
|
}
|
|
|
|
print_error() {
|
|
echo -e "${RED}[✗]${NC} ERROR: $1" >&2
|
|
}
|
|
|
|
check_pgpass() {
|
|
local pgpass_file="$HOME/.pgpass"
|
|
|
|
if [[ ! -f "$pgpass_file" ]]; then
|
|
print_error ".pgpass nicht gefunden: $pgpass_file"
|
|
exit 1
|
|
fi
|
|
|
|
local perms
|
|
perms=$(stat -c%a "$pgpass_file" 2>/dev/null || echo "000")
|
|
|
|
if [[ "$perms" != "600" ]]; then
|
|
print_error ".pgpass hat unsichere Berechtigungen: $perms (erwartet: 600)"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
check_s3cfg() {
|
|
local s3cfg_file="$HOME/.s3cfg"
|
|
|
|
if [[ ! -f "$s3cfg_file" ]]; then
|
|
print_error ".s3cfg nicht gefunden - S3-Restore nicht möglich"
|
|
return 1
|
|
fi
|
|
|
|
if ! command -v s3cmd &> /dev/null; then
|
|
print_error "s3cmd nicht installiert"
|
|
return 1
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
test_db_connection() {
|
|
print_step "Teste Datenbankverbindung..."
|
|
|
|
if ! psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" > /dev/null 2>&1; then
|
|
print_error "Datenbankverbindung fehlgeschlagen!"
|
|
print_error "Prüfe ~/.pgpass und Netzwerkverbindung zu $DB_HOST:$DB_PORT"
|
|
exit 1
|
|
fi
|
|
|
|
print_success "Datenbankverbindung OK"
|
|
}
|
|
|
|
list_local_backups() {
|
|
echo ""
|
|
echo -e "${BLUE}Lokale Backups in ${BACKUP_DIR}:${NC}"
|
|
echo ""
|
|
|
|
if [[ ! -d "$BACKUP_DIR" ]]; then
|
|
print_warning "Backup-Verzeichnis existiert nicht"
|
|
return 1
|
|
fi
|
|
|
|
local count=0
|
|
while IFS= read -r file; do
|
|
if [[ -n "$file" ]]; then
|
|
local size
|
|
size=$(du -h "$file" | cut -f1)
|
|
local date
|
|
date=$(stat -c%y "$file" | cut -d'.' -f1)
|
|
local basename
|
|
basename=$(basename "$file")
|
|
|
|
count=$((count + 1))
|
|
printf " %2d) %-45s %6s %s\n" "$count" "$basename" "$size" "$date"
|
|
fi
|
|
done < <(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f | sort -r)
|
|
|
|
if [[ $count -eq 0 ]]; then
|
|
print_warning "Keine Backups gefunden"
|
|
return 1
|
|
fi
|
|
|
|
echo ""
|
|
return 0
|
|
}
|
|
|
|
list_s3_backups() {
|
|
echo ""
|
|
echo -e "${BLUE}S3-Backups in s3://${S3_BUCKET}/${S3_PATH}/:${NC}"
|
|
echo ""
|
|
|
|
if ! check_s3cfg; then
|
|
return 1
|
|
fi
|
|
|
|
local count=0
|
|
while IFS= read -r line; do
|
|
if [[ -n "$line" ]]; then
|
|
local date size path
|
|
date=$(echo "$line" | awk '{print $1, $2}')
|
|
size=$(echo "$line" | awk '{print $3}')
|
|
path=$(echo "$line" | awk '{print $4}')
|
|
local basename
|
|
basename=$(basename "$path")
|
|
|
|
# Größe formatieren
|
|
if [[ $size -gt 1048576 ]]; then
|
|
size="$((size / 1048576))M"
|
|
elif [[ $size -gt 1024 ]]; then
|
|
size="$((size / 1024))K"
|
|
else
|
|
size="${size}B"
|
|
fi
|
|
|
|
count=$((count + 1))
|
|
printf " %2d) %-45s %6s %s\n" "$count" "$basename" "$size" "$date"
|
|
fi
|
|
done < <(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz" | sort -r)
|
|
|
|
if [[ $count -eq 0 ]]; then
|
|
print_warning "Keine S3-Backups gefunden"
|
|
return 1
|
|
fi
|
|
|
|
echo ""
|
|
return 0
|
|
}
|
|
|
|
select_local_backup() {
|
|
local backups=()
|
|
|
|
while IFS= read -r file; do
|
|
if [[ -n "$file" ]]; then
|
|
backups+=("$file")
|
|
fi
|
|
done < <(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f | sort -r)
|
|
|
|
if [[ ${#backups[@]} -eq 0 ]]; then
|
|
print_error "Keine lokalen Backups gefunden"
|
|
exit 1
|
|
fi
|
|
|
|
list_local_backups
|
|
|
|
read -p "Backup-Nummer auswählen (1-${#backups[@]}): " selection
|
|
|
|
if ! [[ "$selection" =~ ^[0-9]+$ ]] || [[ $selection -lt 1 ]] || [[ $selection -gt ${#backups[@]} ]]; then
|
|
print_error "Ungültige Auswahl"
|
|
exit 1
|
|
fi
|
|
|
|
echo "${backups[$((selection-1))]}"
|
|
}
|
|
|
|
select_s3_backup() {
|
|
local backups=()
|
|
|
|
while IFS= read -r line; do
|
|
if [[ -n "$line" ]]; then
|
|
local path
|
|
path=$(echo "$line" | awk '{print $4}')
|
|
backups+=("$path")
|
|
fi
|
|
done < <(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz" | sort -r)
|
|
|
|
if [[ ${#backups[@]} -eq 0 ]]; then
|
|
print_error "Keine S3-Backups gefunden"
|
|
exit 1
|
|
fi
|
|
|
|
list_s3_backups
|
|
|
|
read -p "Backup-Nummer auswählen (1-${#backups[@]}): " selection
|
|
|
|
if ! [[ "$selection" =~ ^[0-9]+$ ]] || [[ $selection -lt 1 ]] || [[ $selection -gt ${#backups[@]} ]]; then
|
|
print_error "Ungültige Auswahl"
|
|
exit 1
|
|
fi
|
|
|
|
echo "${backups[$((selection-1))]}"
|
|
}
|
|
|
|
download_from_s3() {
|
|
local s3_path="$1"
|
|
local filename
|
|
filename=$(basename "$s3_path")
|
|
local local_path="${TEMP_DIR}/${filename}"
|
|
|
|
mkdir -p "$TEMP_DIR"
|
|
|
|
print_step "Lade Backup von S3 herunter: $filename"
|
|
|
|
if s3cmd get "$s3_path" "$local_path" 2>/dev/null; then
|
|
print_success "Download abgeschlossen"
|
|
echo "$local_path"
|
|
else
|
|
print_error "Download fehlgeschlagen"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
verify_backup() {
|
|
local backup_file="$1"
|
|
|
|
print_step "Verifiziere Backup-Integrität..."
|
|
|
|
if ! gzip -t "$backup_file" 2>/dev/null; then
|
|
print_error "Backup-Datei ist beschädigt (gzip-Test fehlgeschlagen)"
|
|
exit 1
|
|
fi
|
|
|
|
print_success "Backup-Integrität OK"
|
|
}
|
|
|
|
get_db_stats() {
|
|
local table_count row_count
|
|
|
|
table_count=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c \
|
|
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public';" 2>/dev/null | tr -d ' ')
|
|
|
|
row_count=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c \
|
|
"SELECT SUM(n_live_tup) FROM pg_stat_user_tables;" 2>/dev/null | tr -d ' ')
|
|
|
|
echo "Tabellen: ${table_count:-0}, Zeilen: ${row_count:-0}"
|
|
}
|
|
|
|
stop_application() {
|
|
print_step "Stoppe Payload CMS..."
|
|
|
|
if command -v pm2 &> /dev/null; then
|
|
if pm2 list 2>/dev/null | grep -q "payload"; then
|
|
pm2 stop payload 2>/dev/null || true
|
|
print_success "Payload gestoppt"
|
|
else
|
|
print_warning "Payload läuft nicht unter PM2"
|
|
fi
|
|
|
|
if pm2 list 2>/dev/null | grep -q "queue-worker"; then
|
|
pm2 stop queue-worker 2>/dev/null || true
|
|
print_success "Queue-Worker gestoppt"
|
|
fi
|
|
else
|
|
print_warning "PM2 nicht verfügbar - bitte Anwendung manuell stoppen!"
|
|
read -p "Weiter wenn Anwendung gestoppt? [Enter] " -r
|
|
fi
|
|
}
|
|
|
|
start_application() {
|
|
print_step "Starte Payload CMS..."
|
|
|
|
if command -v pm2 &> /dev/null; then
|
|
pm2 start payload 2>/dev/null || true
|
|
pm2 start queue-worker 2>/dev/null || true
|
|
print_success "Anwendung gestartet"
|
|
else
|
|
print_warning "PM2 nicht verfügbar - bitte Anwendung manuell starten!"
|
|
fi
|
|
}
|
|
|
|
perform_restore() {
|
|
local backup_file="$1"
|
|
local filename
|
|
filename=$(basename "$backup_file")
|
|
|
|
echo ""
|
|
echo -e "${YELLOW}╔════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${YELLOW}║ WARNUNG: Datenbank-Restore überschreibt alle Daten! ║${NC}"
|
|
echo -e "${YELLOW}╚════════════════════════════════════════════════════════════╝${NC}"
|
|
echo ""
|
|
echo " Backup: $filename"
|
|
echo " Ziel: $DB_NAME @ $DB_HOST:$DB_PORT"
|
|
echo " Aktuell: $(get_db_stats)"
|
|
echo ""
|
|
|
|
read -p "Restore durchführen? Tippe 'RESTORE' zur Bestätigung: " confirmation
|
|
|
|
if [[ "$confirmation" != "RESTORE" ]]; then
|
|
print_warning "Restore abgebrochen"
|
|
exit 0
|
|
fi
|
|
|
|
echo ""
|
|
|
|
# Anwendung stoppen
|
|
stop_application
|
|
|
|
# Restore durchführen
|
|
print_step "Führe Restore durch..."
|
|
|
|
local start_time
|
|
start_time=$(date +%s)
|
|
|
|
if gunzip -c "$backup_file" | psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -q 2>/dev/null; then
|
|
local end_time
|
|
end_time=$(date +%s)
|
|
local duration=$((end_time - start_time))
|
|
|
|
print_success "Restore abgeschlossen (${duration}s)"
|
|
else
|
|
print_error "Restore fehlgeschlagen!"
|
|
start_application
|
|
exit 1
|
|
fi
|
|
|
|
# Statistik nach Restore
|
|
print_step "Verifiziere Restore..."
|
|
echo " Neue Statistik: $(get_db_stats)"
|
|
|
|
# Anwendung starten
|
|
start_application
|
|
|
|
echo ""
|
|
echo -e "${GREEN}╔════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${GREEN}║ Restore erfolgreich abgeschlossen! ║${NC}"
|
|
echo -e "${GREEN}╚════════════════════════════════════════════════════════════╝${NC}"
|
|
echo ""
|
|
}
|
|
|
|
cleanup_temp() {
|
|
if [[ -d "$TEMP_DIR" ]]; then
|
|
rm -rf "$TEMP_DIR"
|
|
fi
|
|
}
|
|
|
|
show_usage() {
|
|
echo "Verwendung: $0 [OPTION] [BACKUP-DATEI]"
|
|
echo ""
|
|
echo "Optionen:"
|
|
echo " (keine) Interaktive Auswahl aus lokalen Backups"
|
|
echo " <backup-datei> Direktes Restore aus lokaler Datei"
|
|
echo " --from-s3 Interaktive Auswahl aus S3-Backups"
|
|
echo " --from-s3 <name> Direktes Restore aus S3 (Dateiname)"
|
|
echo " --list Lokale Backups auflisten"
|
|
echo " --list-s3 S3-Backups auflisten"
|
|
echo " --help Diese Hilfe anzeigen"
|
|
echo ""
|
|
echo "Beispiele:"
|
|
echo " $0 # Interaktiv"
|
|
echo " $0 /path/to/backup.sql.gz # Direkt lokal"
|
|
echo " $0 --from-s3 # Interaktiv von S3"
|
|
echo " $0 --from-s3 payload_db_2025-12-11_03-00-00.sql.gz"
|
|
echo ""
|
|
}
|
|
|
|
# ============================================================================
|
|
# Hauptprogramm
|
|
# ============================================================================
|
|
|
|
main() {
|
|
trap cleanup_temp EXIT
|
|
|
|
# Hilfe anzeigen
|
|
if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then
|
|
show_usage
|
|
exit 0
|
|
fi
|
|
|
|
# Backups auflisten
|
|
if [[ "${1:-}" == "--list" ]]; then
|
|
list_local_backups
|
|
exit 0
|
|
fi
|
|
|
|
if [[ "${1:-}" == "--list-s3" ]]; then
|
|
list_s3_backups
|
|
exit 0
|
|
fi
|
|
|
|
print_header
|
|
|
|
# Voraussetzungen prüfen
|
|
check_pgpass
|
|
test_db_connection
|
|
|
|
local backup_file=""
|
|
|
|
# S3-Restore
|
|
if [[ "${1:-}" == "--from-s3" ]]; then
|
|
if ! check_s3cfg; then
|
|
exit 1
|
|
fi
|
|
|
|
local s3_path
|
|
|
|
if [[ -n "${2:-}" ]]; then
|
|
# Direkter S3-Pfad
|
|
s3_path="s3://${S3_BUCKET}/${S3_PATH}/${2}"
|
|
else
|
|
# Interaktive Auswahl
|
|
s3_path=$(select_s3_backup)
|
|
fi
|
|
|
|
backup_file=$(download_from_s3 "$s3_path")
|
|
|
|
# Lokales Restore mit Dateiangabe
|
|
elif [[ -n "${1:-}" ]]; then
|
|
backup_file="$1"
|
|
|
|
if [[ ! -f "$backup_file" ]]; then
|
|
print_error "Backup-Datei nicht gefunden: $backup_file"
|
|
exit 1
|
|
fi
|
|
|
|
# Interaktive lokale Auswahl
|
|
else
|
|
backup_file=$(select_local_backup)
|
|
fi
|
|
|
|
# Backup verifizieren
|
|
verify_backup "$backup_file"
|
|
|
|
# Restore durchführen
|
|
perform_restore "$backup_file"
|
|
}
|
|
|
|
main "$@"
|