mirror of
https://github.com/complexcaresolutions/cms.c2sgmbh.git
synced 2026-03-17 16:14:12 +00:00
feat: add automated PostgreSQL backup system with S3 offsite storage
- Add backup-db.sh for daily automated backups via cron - Add restore-db.sh for interactive database restoration - Add setup-backup.sh for easy setup on new servers - Support local and S3 (Hetzner Object Storage) backup locations - 30-day retention with automatic cleanup - Credentials stored securely in ~/.pgpass and ~/.s3cfg - Comprehensive documentation with disaster recovery checklist 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
411f1a040e
commit
a066539129
6 changed files with 1521 additions and 11 deletions
54
CLAUDE.md
54
CLAUDE.md
|
|
@ -92,7 +92,12 @@ Internet → 37.24.237.181 → Caddy (443) → Payload (3000)
|
||||||
│ ├── unit/security/ # Security Unit Tests
|
│ ├── unit/security/ # Security Unit Tests
|
||||||
│ └── int/ # Integration Tests
|
│ └── int/ # Integration Tests
|
||||||
├── scripts/
|
├── scripts/
|
||||||
│ └── run-queue-worker.ts # Queue Worker Starter
|
│ ├── run-queue-worker.ts # Queue Worker Starter
|
||||||
|
│ └── backup/ # Backup-System
|
||||||
|
│ ├── backup-db.sh # PostgreSQL Backup-Skript
|
||||||
|
│ ├── restore-db.sh # PostgreSQL Restore-Skript
|
||||||
|
│ ├── setup-backup.sh # Interaktives Setup
|
||||||
|
│ └── README.md # Backup-Dokumentation
|
||||||
├── .env # Umgebungsvariablen
|
├── .env # Umgebungsvariablen
|
||||||
├── ecosystem.config.cjs # PM2 Config
|
├── ecosystem.config.cjs # PM2 Config
|
||||||
└── .next/ # Build Output
|
└── .next/ # Build Output
|
||||||
|
|
@ -169,6 +174,10 @@ pnpm test:coverage # Mit Coverage-Report
|
||||||
|
|
||||||
# Datenbank prüfen
|
# Datenbank prüfen
|
||||||
PGPASSWORD=Finden55 psql -h 10.10.181.101 -U payload -d payload_db
|
PGPASSWORD=Finden55 psql -h 10.10.181.101 -U payload -d payload_db
|
||||||
|
|
||||||
|
# Backup
|
||||||
|
/home/payload/backups/postgres/backup-db.sh --verbose # Manuelles Backup
|
||||||
|
scripts/backup/setup-backup.sh # Backup-System einrichten
|
||||||
```
|
```
|
||||||
|
|
||||||
## Workflow nach Code-Änderungen
|
## Workflow nach Code-Änderungen
|
||||||
|
|
@ -425,6 +434,46 @@ const cached = await redis.get('key')
|
||||||
await redis.keys('posts:*').then(keys => keys.length && redis.del(...keys))
|
await redis.keys('posts:*').then(keys => keys.length && redis.del(...keys))
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Backup-System
|
||||||
|
|
||||||
|
Automatisches tägliches PostgreSQL-Backup mit lokalem Speicher und S3-Offsite-Backup.
|
||||||
|
|
||||||
|
**Setup auf neuem Server:**
|
||||||
|
```bash
|
||||||
|
cd /home/payload/payload-cms/scripts/backup
|
||||||
|
./setup-backup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
**Konfigurationsdateien (nicht im Repo):**
|
||||||
|
- `~/.pgpass` - PostgreSQL-Credentials (chmod 600)
|
||||||
|
- `~/.s3cfg` - S3-Credentials (chmod 600)
|
||||||
|
|
||||||
|
**Backup-Speicherorte:**
|
||||||
|
| Ort | Pfad | Retention |
|
||||||
|
|-----|------|-----------|
|
||||||
|
| Lokal | `/home/payload/backups/postgres/` | 30 Tage |
|
||||||
|
| S3 | `s3://c2s/backups/postgres/` | 30 Tage |
|
||||||
|
|
||||||
|
**Cron-Job:** Täglich um 03:00 Uhr
|
||||||
|
|
||||||
|
**Manuelles Backup:**
|
||||||
|
```bash
|
||||||
|
/home/payload/backups/postgres/backup-db.sh --verbose
|
||||||
|
```
|
||||||
|
|
||||||
|
**Restore aus Backup:**
|
||||||
|
```bash
|
||||||
|
# Lokal
|
||||||
|
gunzip -c /home/payload/backups/postgres/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz | \
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db
|
||||||
|
|
||||||
|
# Aus S3
|
||||||
|
s3cmd get s3://c2s/backups/postgres/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz
|
||||||
|
gunzip -c payload_db_*.sql.gz | psql -h 10.10.181.101 -U payload -d payload_db
|
||||||
|
```
|
||||||
|
|
||||||
|
Dokumentation: `scripts/backup/README.md`
|
||||||
|
|
||||||
## Datenbank-Direktzugriff
|
## Datenbank-Direktzugriff
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
@ -517,5 +566,6 @@ pnpm test:coverage
|
||||||
- `docs/INFRASTRUCTURE.md` - Server-Architektur & Deployment
|
- `docs/INFRASTRUCTURE.md` - Server-Architektur & Deployment
|
||||||
- `docs/anleitungen/TODO.md` - Task-Liste & Roadmap
|
- `docs/anleitungen/TODO.md` - Task-Liste & Roadmap
|
||||||
- `docs/anleitungen/SECURITY.md` - Sicherheitsrichtlinien
|
- `docs/anleitungen/SECURITY.md` - Sicherheitsrichtlinien
|
||||||
|
- `scripts/backup/README.md` - Backup-System Dokumentation
|
||||||
|
|
||||||
*Letzte Aktualisierung: 10.12.2025*
|
*Letzte Aktualisierung: 11.12.2025*
|
||||||
|
|
|
||||||
|
|
@ -184,10 +184,10 @@
|
||||||
- [x] Manuelle Datenbank-Backups (pg_dump)
|
- [x] Manuelle Datenbank-Backups (pg_dump)
|
||||||
- [x] SQL-Dateien in .gitignore
|
- [x] SQL-Dateien in .gitignore
|
||||||
- [x] Backup via Git (temporär für Migration)
|
- [x] Backup via Git (temporär für Migration)
|
||||||
- [ ] Automatische Datenbank-Backups (Cron)
|
- [x] Automatische Datenbank-Backups (Cron) (Erledigt: 11.12.2025)
|
||||||
|
- [x] Backup-Rotation (30 Tage Retention) - im Skript integriert
|
||||||
- [ ] Media-Backup zu S3/MinIO
|
- [ ] Media-Backup zu S3/MinIO
|
||||||
- [ ] Disaster Recovery Plan
|
- [ ] Disaster Recovery Plan
|
||||||
- [ ] Backup-Rotation (30 Tage Retention)
|
|
||||||
|
|
||||||
- [ ] **Monitoring & Logging** (→ siehe Phase 4: Produktionsreife)
|
- [ ] **Monitoring & Logging** (→ siehe Phase 4: Produktionsreife)
|
||||||
- Sentry Error Tracking
|
- Sentry Error Tracking
|
||||||
|
|
@ -219,11 +219,21 @@
|
||||||
- [x] Statistik-Endpoint (letzte 24h/7d/30d) - `/api/email-logs/stats`
|
- [x] Statistik-Endpoint (letzte 24h/7d/30d) - `/api/email-logs/stats`
|
||||||
|
|
||||||
#### Backup & Recovery
|
#### Backup & Recovery
|
||||||
- [ ] **Automatisierte Datenbank-Backups**
|
- [x] **Automatisierte Datenbank-Backups** (Erledigt: 11.12.2025)
|
||||||
- [ ] Cron-Job für tägliche pg_dump
|
- [x] Cron-Job für tägliche pg_dump
|
||||||
- [ ] Offsite-Storage (S3/MinIO)
|
- Skript: `/home/payload/backups/postgres/backup-db.sh`
|
||||||
- [ ] Backup-Rotation (30 Tage Retention)
|
- Cron: Täglich um 03:00 Uhr
|
||||||
- [ ] Dokumentierter Restore-Prozess
|
- Log: `/home/payload/logs/backup-cron.log`
|
||||||
|
- [x] Backup-Rotation (30 Tage Retention) - lokal und S3
|
||||||
|
- [x] Offsite-Storage (Hetzner Object Storage)
|
||||||
|
- Endpoint: `fsn1.your-objectstorage.com`
|
||||||
|
- Bucket: `s3://c2s/backups/postgres/`
|
||||||
|
- Credentials: `~/.s3cfg` (chmod 600)
|
||||||
|
- [x] Dokumentierter Restore-Prozess (Erledigt: 11.12.2025)
|
||||||
|
- Restore-Skript: `scripts/backup/restore-db.sh`
|
||||||
|
- Interaktive Auswahl aus lokalen/S3-Backups
|
||||||
|
- Automatisches Stoppen/Starten der Anwendung
|
||||||
|
- Disaster Recovery Checkliste in README
|
||||||
- [ ] **Media-Backup**
|
- [ ] **Media-Backup**
|
||||||
- [ ] S3/MinIO Integration für Media-Uploads
|
- [ ] S3/MinIO Integration für Media-Uploads
|
||||||
- [ ] Versionierung aktivieren
|
- [ ] Versionierung aktivieren
|
||||||
|
|
@ -506,7 +516,7 @@
|
||||||
### Nächste Schritte (Priorisiert)
|
### Nächste Schritte (Priorisiert)
|
||||||
|
|
||||||
1. ~~**[KRITISCH]** AuditLogs Collection implementieren~~ ✅ Erledigt
|
1. ~~**[KRITISCH]** AuditLogs Collection implementieren~~ ✅ Erledigt
|
||||||
2. **[KRITISCH]** Automatisierte Backups einrichten
|
2. ~~**[KRITISCH]** Automatisierte Backups einrichten~~ ✅ Erledigt (11.12.2025)
|
||||||
3. ~~**[HOCH]** Full-Text-Search aktivieren (USE_FTS=true)~~ ✅ Erledigt
|
3. ~~**[HOCH]** Full-Text-Search aktivieren (USE_FTS=true)~~ ✅ Erledigt
|
||||||
4. **[HOCH]** Rate-Limits auf Redis migrieren (In-Memory-Fallback funktioniert)
|
4. **[HOCH]** Rate-Limits auf Redis migrieren (In-Memory-Fallback funktioniert)
|
||||||
5. ~~**[MITTEL]** CI/CD Pipeline mit GitHub Actions~~ ✅ security.yml erstellt
|
5. ~~**[MITTEL]** CI/CD Pipeline mit GitHub Actions~~ ✅ security.yml erstellt
|
||||||
|
|
@ -523,12 +533,32 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
*Letzte Aktualisierung: 10.12.2025*
|
*Letzte Aktualisierung: 11.12.2025*
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
|
### 11.12.2025
|
||||||
|
- **Automatisierte Datenbank-Backups:** Cron-Job für tägliche pg_dump eingerichtet
|
||||||
|
- Backup-Skript: `/home/payload/backups/postgres/backup-db.sh`
|
||||||
|
- Tägliche Ausführung um 03:00 Uhr via Cron
|
||||||
|
- Automatische Rotation: Backups älter als 30 Tage werden gelöscht
|
||||||
|
- Komprimierte Backups mit gzip (~42KB pro Backup)
|
||||||
|
- Integritätsprüfung nach jedem Backup
|
||||||
|
- Detaillierte Logs in `/home/payload/backups/postgres/backup.log`
|
||||||
|
- **Offsite-Backup zu Hetzner Object Storage:**
|
||||||
|
- s3cmd installiert und konfiguriert (`~/.s3cfg`, chmod 600)
|
||||||
|
- Automatischer Upload nach jedem Backup zu `s3://c2s/backups/postgres/`
|
||||||
|
- 30-Tage-Retention auch auf S3 (alte Backups werden automatisch gelöscht)
|
||||||
|
- Endpoint: `fsn1.your-objectstorage.com`
|
||||||
|
- **Dokumentierter Restore-Prozess:**
|
||||||
|
- Interaktives Restore-Skript: `scripts/backup/restore-db.sh`
|
||||||
|
- Unterstützt lokale und S3-Backups
|
||||||
|
- Automatisches Stoppen/Starten von PM2
|
||||||
|
- Backup-Verifizierung vor Restore
|
||||||
|
- Disaster Recovery Checkliste in `scripts/backup/README.md`
|
||||||
|
|
||||||
### 09.12.2025
|
### 09.12.2025
|
||||||
- **Admin Login Fix:** Custom Login-Route unterstützt nun `_payload` JSON-Feld aus multipart/form-data (Payload Admin Panel Format)
|
- **Admin Login Fix:** Custom Login-Route unterstützt nun `_payload` JSON-Feld aus multipart/form-data (Payload Admin Panel Format)
|
||||||
- **Dokumentation bereinigt:** Obsolete PROMPT_*.md Instruktionsdateien gelöscht
|
- **Dokumentation bereinigt:** Obsolete PROMPT_*.md Instruktionsdateien gelöscht
|
||||||
|
|
|
||||||
303
scripts/backup/README.md
Normal file
303
scripts/backup/README.md
Normal file
|
|
@ -0,0 +1,303 @@
|
||||||
|
# Backup-System
|
||||||
|
|
||||||
|
Automatisches PostgreSQL-Backup mit lokalem Speicher und optionalem S3-Offsite-Backup.
|
||||||
|
|
||||||
|
## Schnellstart
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Auf neuem Server einrichten
|
||||||
|
cd /home/payload/payload-cms/scripts/backup
|
||||||
|
./setup-backup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Das Setup-Skript führt interaktiv durch:
|
||||||
|
1. PostgreSQL-Authentifizierung (`.pgpass`)
|
||||||
|
2. S3-Konfiguration (`.s3cfg`) - optional
|
||||||
|
3. Installation des Backup-Skripts
|
||||||
|
4. Einrichtung des Cron-Jobs
|
||||||
|
5. Test-Backup
|
||||||
|
|
||||||
|
## Manuelle Einrichtung
|
||||||
|
|
||||||
|
### 1. PostgreSQL-Authentifizierung
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# .pgpass erstellen
|
||||||
|
echo "10.10.181.101:5432:payload_db:payload:DEIN_PASSWORT" > ~/.pgpass
|
||||||
|
chmod 600 ~/.pgpass
|
||||||
|
|
||||||
|
# Verbindung testen
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db -c "SELECT 1"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. S3-Konfiguration (optional)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# s3cmd installieren
|
||||||
|
sudo apt install s3cmd
|
||||||
|
|
||||||
|
# .s3cfg erstellen
|
||||||
|
cat > ~/.s3cfg << 'EOF'
|
||||||
|
[default]
|
||||||
|
access_key = DEIN_ACCESS_KEY
|
||||||
|
secret_key = DEIN_SECRET_KEY
|
||||||
|
host_base = fsn1.your-objectstorage.com
|
||||||
|
host_bucket = %(bucket)s.fsn1.your-objectstorage.com
|
||||||
|
use_https = True
|
||||||
|
signature_v2 = False
|
||||||
|
EOF
|
||||||
|
chmod 600 ~/.s3cfg
|
||||||
|
|
||||||
|
# Verbindung testen
|
||||||
|
s3cmd ls
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Backup-Skript installieren
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Verzeichnis erstellen
|
||||||
|
mkdir -p /home/payload/backups/postgres
|
||||||
|
|
||||||
|
# Skript kopieren
|
||||||
|
cp scripts/backup/backup-db.sh /home/payload/backups/postgres/
|
||||||
|
chmod +x /home/payload/backups/postgres/backup-db.sh
|
||||||
|
|
||||||
|
# Test
|
||||||
|
/home/payload/backups/postgres/backup-db.sh --verbose
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Cron-Job einrichten
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Crontab bearbeiten
|
||||||
|
crontab -e
|
||||||
|
|
||||||
|
# Zeile hinzufügen (täglich um 03:00 Uhr)
|
||||||
|
0 3 * * * /home/payload/backups/postgres/backup-db.sh >> /home/payload/logs/backup-cron.log 2>&1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup-Skript Optionen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Normales Backup (für Cron)
|
||||||
|
./backup-db.sh
|
||||||
|
|
||||||
|
# Mit detaillierter Ausgabe
|
||||||
|
./backup-db.sh --verbose
|
||||||
|
```
|
||||||
|
|
||||||
|
### Umgebungsvariablen
|
||||||
|
|
||||||
|
| Variable | Default | Beschreibung |
|
||||||
|
|----------|---------|--------------|
|
||||||
|
| `PGHOST` | 10.10.181.101 | PostgreSQL Host |
|
||||||
|
| `PGPORT` | 5432 | PostgreSQL Port |
|
||||||
|
| `PGDATABASE` | payload_db | Datenbank-Name |
|
||||||
|
| `PGUSER` | payload | Datenbank-User |
|
||||||
|
| `S3_BACKUP_ENABLED` | true | S3-Upload aktivieren |
|
||||||
|
| `S3_BUCKET` | c2s | S3-Bucket-Name |
|
||||||
|
| `S3_PATH` | backups/postgres | Pfad im Bucket |
|
||||||
|
|
||||||
|
## Backup-Speicherorte
|
||||||
|
|
||||||
|
| Ort | Pfad | Retention |
|
||||||
|
|-----|------|-----------|
|
||||||
|
| Lokal | `/home/payload/backups/postgres/` | 30 Tage |
|
||||||
|
| S3 | `s3://c2s/backups/postgres/` | 30 Tage |
|
||||||
|
|
||||||
|
## Log-Dateien
|
||||||
|
|
||||||
|
| Datei | Beschreibung |
|
||||||
|
|-------|--------------|
|
||||||
|
| `/home/payload/backups/postgres/backup.log` | Detailliertes Backup-Log |
|
||||||
|
| `/home/payload/logs/backup-cron.log` | Cron-Ausgabe |
|
||||||
|
|
||||||
|
Das Backup-Log wird automatisch rotiert (max. 10MB, behält letzte 1000 Zeilen).
|
||||||
|
|
||||||
|
## Restore
|
||||||
|
|
||||||
|
### Restore-Skript (empfohlen)
|
||||||
|
|
||||||
|
Das interaktive Restore-Skript führt sicher durch den Wiederherstellungsprozess:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Interaktive Auswahl aus lokalen Backups
|
||||||
|
./restore-db.sh
|
||||||
|
|
||||||
|
# Interaktive Auswahl aus S3-Backups
|
||||||
|
./restore-db.sh --from-s3
|
||||||
|
|
||||||
|
# Direktes Restore aus lokaler Datei
|
||||||
|
./restore-db.sh /home/payload/backups/postgres/payload_db_2025-12-11_03-00-00.sql.gz
|
||||||
|
|
||||||
|
# Direktes Restore aus S3
|
||||||
|
./restore-db.sh --from-s3 payload_db_2025-12-11_03-00-00.sql.gz
|
||||||
|
|
||||||
|
# Backups auflisten
|
||||||
|
./restore-db.sh --list # Lokale Backups
|
||||||
|
./restore-db.sh --list-s3 # S3-Backups
|
||||||
|
```
|
||||||
|
|
||||||
|
**Das Restore-Skript führt automatisch durch:**
|
||||||
|
1. Prüfung der Datenbankverbindung
|
||||||
|
2. Auflistung verfügbarer Backups
|
||||||
|
3. Download von S3 (falls gewählt)
|
||||||
|
4. Verifizierung der Backup-Integrität
|
||||||
|
5. Stoppen von Payload CMS und Queue-Worker
|
||||||
|
6. Durchführung des Restores
|
||||||
|
7. Verifizierung der wiederhergestellten Daten
|
||||||
|
8. Neustart der Anwendung
|
||||||
|
|
||||||
|
### Manueller Restore-Prozess
|
||||||
|
|
||||||
|
Falls das Skript nicht verfügbar ist:
|
||||||
|
|
||||||
|
#### Schritt 1: Anwendung stoppen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pm2 stop payload
|
||||||
|
pm2 stop queue-worker
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 2: Backup auswählen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Lokale Backups anzeigen
|
||||||
|
ls -lah /home/payload/backups/postgres/*.sql.gz
|
||||||
|
|
||||||
|
# S3-Backups anzeigen
|
||||||
|
s3cmd ls s3://c2s/backups/postgres/
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 3: Backup herunterladen (nur bei S3)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
s3cmd get s3://c2s/backups/postgres/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz /tmp/
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 4: Backup verifizieren
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Integrität prüfen
|
||||||
|
gzip -t /tmp/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz && echo "OK" || echo "BESCHÄDIGT"
|
||||||
|
|
||||||
|
# Inhalt inspizieren (erste 50 Zeilen)
|
||||||
|
gunzip -c /tmp/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz | head -50
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 5: Restore durchführen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Aus lokalem Backup
|
||||||
|
gunzip -c /home/payload/backups/postgres/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz | \
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db
|
||||||
|
|
||||||
|
# Aus heruntergeladenem S3-Backup
|
||||||
|
gunzip -c /tmp/payload_db_YYYY-MM-DD_HH-MM-SS.sql.gz | \
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 6: Restore verifizieren
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Tabellen zählen
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db -c \
|
||||||
|
"SELECT COUNT(*) as tables FROM information_schema.tables WHERE table_schema = 'public';"
|
||||||
|
|
||||||
|
# Zeilen in wichtigen Tabellen prüfen
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db -c \
|
||||||
|
"SELECT 'users' as table, COUNT(*) as rows FROM users
|
||||||
|
UNION ALL SELECT 'tenants', COUNT(*) FROM tenants
|
||||||
|
UNION ALL SELECT 'posts', COUNT(*) FROM posts
|
||||||
|
UNION ALL SELECT 'pages', COUNT(*) FROM pages;"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 7: Anwendung starten
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pm2 start payload
|
||||||
|
pm2 start queue-worker
|
||||||
|
pm2 status
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Schritt 8: Funktionalität testen
|
||||||
|
|
||||||
|
1. Admin-Panel öffnen: https://pl.c2sgmbh.de/admin
|
||||||
|
2. Login testen
|
||||||
|
3. Stichprobenartig Inhalte prüfen
|
||||||
|
|
||||||
|
### Disaster Recovery Checkliste
|
||||||
|
|
||||||
|
Bei vollständigem Systemausfall:
|
||||||
|
|
||||||
|
```
|
||||||
|
□ 1. Neuen Server bereitstellen
|
||||||
|
□ 2. Repository klonen: git clone https://github.com/c2s-admin/cms.c2sgmbh.git
|
||||||
|
□ 3. Dependencies installieren: pnpm install
|
||||||
|
□ 4. .env konfigurieren (aus Dokumentation oder Backup)
|
||||||
|
□ 5. Backup-System einrichten: ./scripts/backup/setup-backup.sh
|
||||||
|
□ 6. Neuestes Backup von S3 holen: s3cmd ls s3://c2s/backups/postgres/
|
||||||
|
□ 7. Restore durchführen: ./scripts/backup/restore-db.sh --from-s3
|
||||||
|
□ 8. Build erstellen: pnpm build
|
||||||
|
□ 9. Anwendung starten: pm2 start ecosystem.config.cjs
|
||||||
|
□ 10. DNS/Reverse Proxy konfigurieren
|
||||||
|
□ 11. SSL-Zertifikate einrichten (Caddy automatisch)
|
||||||
|
□ 12. Funktionalität testen
|
||||||
|
```
|
||||||
|
|
||||||
|
### Point-in-Time Recovery
|
||||||
|
|
||||||
|
Für Recovery zu einem bestimmten Zeitpunkt:
|
||||||
|
|
||||||
|
1. Backup VOR dem gewünschten Zeitpunkt identifizieren
|
||||||
|
2. Restore durchführen
|
||||||
|
3. Manuelle Nacharbeit für Daten zwischen Backup und Zielzeitpunkt
|
||||||
|
|
||||||
|
**Hinweis:** Für echtes Point-in-Time Recovery wäre PostgreSQL WAL-Archivierung erforderlich (nicht implementiert).
|
||||||
|
|
||||||
|
## Sicherheit
|
||||||
|
|
||||||
|
- **Keine Credentials im Skript**: Passwörter werden aus `~/.pgpass` und `~/.s3cfg` gelesen
|
||||||
|
- **Berechtigungen**: Beide Dateien müssen `chmod 600` haben
|
||||||
|
- **Credential-Dateien nicht committen**: `.pgpass` und `.s3cfg` sind in `.gitignore`
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Backup fehlgeschlagen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Log prüfen
|
||||||
|
tail -50 /home/payload/backups/postgres/backup.log
|
||||||
|
|
||||||
|
# Datenbankverbindung testen
|
||||||
|
psql -h 10.10.181.101 -U payload -d payload_db -c "SELECT 1"
|
||||||
|
|
||||||
|
# .pgpass Berechtigungen prüfen
|
||||||
|
ls -la ~/.pgpass # Muss 600 sein
|
||||||
|
```
|
||||||
|
|
||||||
|
### S3-Upload fehlgeschlagen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# S3-Verbindung testen
|
||||||
|
s3cmd ls
|
||||||
|
|
||||||
|
# .s3cfg Berechtigungen prüfen
|
||||||
|
ls -la ~/.s3cfg # Muss 600 sein
|
||||||
|
|
||||||
|
# S3 manuell testen
|
||||||
|
s3cmd put testfile.txt s3://c2s/test/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cron läuft nicht
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Crontab prüfen
|
||||||
|
crontab -l
|
||||||
|
|
||||||
|
# Cron-Log prüfen
|
||||||
|
tail -50 /home/payload/logs/backup-cron.log
|
||||||
|
|
||||||
|
# Cron-Service Status
|
||||||
|
systemctl status cron
|
||||||
|
```
|
||||||
347
scripts/backup/backup-db.sh
Executable file
347
scripts/backup/backup-db.sh
Executable file
|
|
@ -0,0 +1,347 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Payload CMS - Tägliches PostgreSQL Backup-Skript
|
||||||
|
# Erstellt: 11.12.2025
|
||||||
|
# Aktualisiert: 11.12.2025 (S3 Offsite-Backup)
|
||||||
|
#
|
||||||
|
# Verwendung:
|
||||||
|
# ./backup-db.sh # Normales Backup
|
||||||
|
# ./backup-db.sh --verbose # Mit detaillierter Ausgabe
|
||||||
|
#
|
||||||
|
# Voraussetzungen:
|
||||||
|
# - ~/.pgpass mit korrekten Berechtigungen (chmod 600)
|
||||||
|
# - ~/.s3cfg mit korrekten Berechtigungen (chmod 600) für Offsite-Backup
|
||||||
|
#
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Konfiguration
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
# Datenbank-Verbindung (Passwort via ~/.pgpass)
|
||||||
|
DB_HOST="${PGHOST:-10.10.181.101}"
|
||||||
|
DB_PORT="${PGPORT:-5432}"
|
||||||
|
DB_NAME="${PGDATABASE:-payload_db}"
|
||||||
|
DB_USER="${PGUSER:-payload}"
|
||||||
|
|
||||||
|
# Backup-Verzeichnis
|
||||||
|
BACKUP_DIR="/home/payload/backups/postgres"
|
||||||
|
|
||||||
|
# Retention: Anzahl der Tage, die Backups behalten werden (lokal und S3)
|
||||||
|
RETENTION_DAYS=30
|
||||||
|
|
||||||
|
# Log-Datei und maximale Größe (10MB)
|
||||||
|
LOG_FILE="${BACKUP_DIR}/backup.log"
|
||||||
|
LOG_MAX_SIZE=$((10 * 1024 * 1024))
|
||||||
|
|
||||||
|
# S3 Offsite-Backup (Credentials via ~/.s3cfg)
|
||||||
|
S3_ENABLED="${S3_BACKUP_ENABLED:-true}"
|
||||||
|
S3_BUCKET="${S3_BUCKET:-c2s}"
|
||||||
|
S3_PATH="${S3_PATH:-backups/postgres}"
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Variablen
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||||
|
BACKUP_FILE="${BACKUP_DIR}/payload_db_${TIMESTAMP}.sql.gz"
|
||||||
|
VERBOSE=${1:-""}
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Funktionen
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
log() {
|
||||||
|
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||||
|
echo "$message" >> "$LOG_FILE"
|
||||||
|
if [[ "$VERBOSE" == "--verbose" ]]; then
|
||||||
|
echo "$message"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
log_error() {
|
||||||
|
local message="[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $1"
|
||||||
|
echo "$message" >> "$LOG_FILE"
|
||||||
|
echo "$message" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
rotate_log() {
|
||||||
|
if [[ -f "$LOG_FILE" ]]; then
|
||||||
|
local log_size
|
||||||
|
log_size=$(stat -c%s "$LOG_FILE" 2>/dev/null || echo 0)
|
||||||
|
|
||||||
|
if [[ $log_size -gt $LOG_MAX_SIZE ]]; then
|
||||||
|
# Behalte die letzten 1000 Zeilen
|
||||||
|
tail -1000 "$LOG_FILE" > "${LOG_FILE}.tmp"
|
||||||
|
mv "${LOG_FILE}.tmp" "$LOG_FILE"
|
||||||
|
log "Log-Datei rotiert (war ${log_size} Bytes)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_pgpass() {
|
||||||
|
local pgpass_file="$HOME/.pgpass"
|
||||||
|
|
||||||
|
if [[ ! -f "$pgpass_file" ]]; then
|
||||||
|
log_error ".pgpass Datei nicht gefunden: $pgpass_file"
|
||||||
|
log_error "Erstelle sie mit: echo 'host:port:database:user:password' > ~/.pgpass && chmod 600 ~/.pgpass"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local perms
|
||||||
|
perms=$(stat -c%a "$pgpass_file" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
if [[ "$perms" != "600" ]]; then
|
||||||
|
log_error ".pgpass hat unsichere Berechtigungen: $perms (erwartet: 600)"
|
||||||
|
log_error "Behebe mit: chmod 600 ~/.pgpass"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_s3cfg() {
|
||||||
|
local s3cfg_file="$HOME/.s3cfg"
|
||||||
|
|
||||||
|
if [[ ! -f "$s3cfg_file" ]]; then
|
||||||
|
log "WARNUNG: .s3cfg nicht gefunden - S3-Upload wird übersprungen"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local perms
|
||||||
|
perms=$(stat -c%a "$s3cfg_file" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
if [[ "$perms" != "600" ]]; then
|
||||||
|
log_error ".s3cfg hat unsichere Berechtigungen: $perms (erwartet: 600)"
|
||||||
|
log_error "Behebe mit: chmod 600 ~/.s3cfg"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v s3cmd &> /dev/null; then
|
||||||
|
log "WARNUNG: s3cmd nicht installiert - S3-Upload wird übersprungen"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
upload_to_s3() {
|
||||||
|
local backup_file="$1"
|
||||||
|
local filename
|
||||||
|
filename=$(basename "$backup_file")
|
||||||
|
|
||||||
|
log "--- S3 Offsite-Backup ---"
|
||||||
|
log "Lade hoch nach s3://${S3_BUCKET}/${S3_PATH}/"
|
||||||
|
|
||||||
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
|
|
||||||
|
if s3cmd put "$backup_file" "s3://${S3_BUCKET}/${S3_PATH}/" 2>> "$LOG_FILE"; then
|
||||||
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
|
log "S3-Upload erfolgreich: $filename (${duration}s)"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_error "S3-Upload fehlgeschlagen für: $filename"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup_s3_backups() {
|
||||||
|
log "Prüfe S3 auf alte Backups..."
|
||||||
|
|
||||||
|
local cutoff_date
|
||||||
|
cutoff_date=$(date -d "-${RETENTION_DAYS} days" +"%Y-%m-%d")
|
||||||
|
|
||||||
|
local deleted_count=0
|
||||||
|
|
||||||
|
# Liste alle Backups im S3-Bucket
|
||||||
|
while IFS= read -r line; do
|
||||||
|
# Format: 2025-12-11 09:54 42356 s3://c2s/backups/postgres/payload_db_...
|
||||||
|
local file_date file_path
|
||||||
|
file_date=$(echo "$line" | awk '{print $1}')
|
||||||
|
file_path=$(echo "$line" | awk '{print $4}')
|
||||||
|
|
||||||
|
if [[ -n "$file_path" && "$file_date" < "$cutoff_date" ]]; then
|
||||||
|
if s3cmd del "$file_path" 2>> "$LOG_FILE"; then
|
||||||
|
log "S3 gelöscht: $(basename "$file_path")"
|
||||||
|
((deleted_count++))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < <(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz")
|
||||||
|
|
||||||
|
if [[ $deleted_count -gt 0 ]]; then
|
||||||
|
log "S3: $deleted_count alte Backups gelöscht"
|
||||||
|
else
|
||||||
|
log "S3: Keine alten Backups zum Löschen"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
get_s3_stats() {
|
||||||
|
local s3_count s3_size
|
||||||
|
|
||||||
|
s3_count=$(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep -c "payload_db_.*\.sql\.gz" || echo 0)
|
||||||
|
s3_size=$(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz" | awk '{sum+=$3} END {print sum}' || echo 0)
|
||||||
|
|
||||||
|
# Bytes zu lesbarer Größe konvertieren (ohne bc)
|
||||||
|
local s3_size_human
|
||||||
|
if [[ $s3_size -gt 1073741824 ]]; then
|
||||||
|
s3_size_human="$((s3_size / 1073741824))G"
|
||||||
|
elif [[ $s3_size -gt 1048576 ]]; then
|
||||||
|
s3_size_human="$((s3_size / 1048576))M"
|
||||||
|
elif [[ $s3_size -gt 1024 ]]; then
|
||||||
|
s3_size_human="$((s3_size / 1024))K"
|
||||||
|
else
|
||||||
|
s3_size_human="${s3_size}B"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "S3 Backups: $s3_count (${s3_size_human})"
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup_old_backups() {
|
||||||
|
log "Lösche lokale Backups älter als ${RETENTION_DAYS} Tage..."
|
||||||
|
|
||||||
|
local deleted_count=0
|
||||||
|
while IFS= read -r -d '' file; do
|
||||||
|
rm -f "$file"
|
||||||
|
log "Gelöscht: $(basename "$file")"
|
||||||
|
((deleted_count++))
|
||||||
|
done < <(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -mtime +${RETENTION_DAYS} -print0 2>/dev/null)
|
||||||
|
|
||||||
|
if [[ $deleted_count -gt 0 ]]; then
|
||||||
|
log "Lokal: $deleted_count alte Backups gelöscht"
|
||||||
|
else
|
||||||
|
log "Lokal: Keine alten Backups zum Löschen"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
get_backup_stats() {
|
||||||
|
local backup_count
|
||||||
|
local total_size
|
||||||
|
local oldest_backup
|
||||||
|
local newest_backup
|
||||||
|
|
||||||
|
backup_count=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f 2>/dev/null | wc -l)
|
||||||
|
total_size=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -exec du -ch {} + 2>/dev/null | tail -1 | cut -f1)
|
||||||
|
oldest_backup=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -printf '%T+ %p\n' 2>/dev/null | sort | head -1 | cut -d' ' -f2 | xargs basename 2>/dev/null || echo "N/A")
|
||||||
|
newest_backup=$(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f -printf '%T+ %p\n' 2>/dev/null | sort -r | head -1 | cut -d' ' -f2 | xargs basename 2>/dev/null || echo "N/A")
|
||||||
|
|
||||||
|
log "--- Backup-Statistik ---"
|
||||||
|
log "Lokale Backups: $backup_count (${total_size:-0})"
|
||||||
|
log "Ältestes: $oldest_backup"
|
||||||
|
log "Neuestes: $newest_backup"
|
||||||
|
}
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Hauptprogramm
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
main() {
|
||||||
|
# Log-Rotation am Anfang
|
||||||
|
rotate_log
|
||||||
|
|
||||||
|
log "=========================================="
|
||||||
|
log "Starte PostgreSQL Backup für $DB_NAME"
|
||||||
|
log "=========================================="
|
||||||
|
|
||||||
|
# Prüfe ob Backup-Verzeichnis existiert
|
||||||
|
if [[ ! -d "$BACKUP_DIR" ]]; then
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
log "Backup-Verzeichnis erstellt: $BACKUP_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Prüfe .pgpass
|
||||||
|
check_pgpass
|
||||||
|
|
||||||
|
# Prüfe Datenbankverbindung
|
||||||
|
log "Prüfe Datenbankverbindung..."
|
||||||
|
if ! psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" > /dev/null 2>&1; then
|
||||||
|
log_error "Datenbankverbindung fehlgeschlagen!"
|
||||||
|
log_error "Prüfe ~/.pgpass und Netzwerkverbindung zu $DB_HOST:$DB_PORT"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
log "Datenbankverbindung erfolgreich"
|
||||||
|
|
||||||
|
# Erstelle Backup
|
||||||
|
log "Erstelle Backup: $(basename "$BACKUP_FILE")"
|
||||||
|
|
||||||
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
|
|
||||||
|
if pg_dump \
|
||||||
|
-h "$DB_HOST" \
|
||||||
|
-p "$DB_PORT" \
|
||||||
|
-U "$DB_USER" \
|
||||||
|
-d "$DB_NAME" \
|
||||||
|
--format=plain \
|
||||||
|
--no-owner \
|
||||||
|
--no-privileges \
|
||||||
|
--clean \
|
||||||
|
--if-exists \
|
||||||
|
2>> "$LOG_FILE" | gzip > "$BACKUP_FILE"; then
|
||||||
|
|
||||||
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
|
local duration=$((end_time - start_time))
|
||||||
|
local file_size
|
||||||
|
file_size=$(du -h "$BACKUP_FILE" | cut -f1)
|
||||||
|
|
||||||
|
log "Backup erfolgreich erstellt!"
|
||||||
|
log "Datei: $(basename "$BACKUP_FILE")"
|
||||||
|
log "Größe: $file_size"
|
||||||
|
log "Dauer: ${duration}s"
|
||||||
|
|
||||||
|
# Verifiziere Backup
|
||||||
|
if gzip -t "$BACKUP_FILE" 2>/dev/null; then
|
||||||
|
log "Backup-Integrität: OK (gzip-Test bestanden)"
|
||||||
|
else
|
||||||
|
log_error "Backup-Integrität: FEHLER (gzip-Test fehlgeschlagen)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Backup fehlgeschlagen!"
|
||||||
|
rm -f "$BACKUP_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# S3 Offsite-Backup
|
||||||
|
local s3_success=false
|
||||||
|
if [[ "$S3_ENABLED" == "true" ]]; then
|
||||||
|
if check_s3cfg; then
|
||||||
|
if upload_to_s3 "$BACKUP_FILE"; then
|
||||||
|
s3_success=true
|
||||||
|
cleanup_s3_backups
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log "S3-Backup übersprungen (Konfiguration fehlt)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log "S3-Backup deaktiviert (S3_BACKUP_ENABLED=false)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Lösche alte lokale Backups
|
||||||
|
cleanup_old_backups
|
||||||
|
|
||||||
|
# Zeige Statistik
|
||||||
|
get_backup_stats
|
||||||
|
if [[ "$s3_success" == "true" ]]; then
|
||||||
|
get_s3_stats
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "=========================================="
|
||||||
|
log "Backup abgeschlossen"
|
||||||
|
log "=========================================="
|
||||||
|
|
||||||
|
# Ausgabe für Cron (nur bei Erfolg, nicht-verbose)
|
||||||
|
if [[ "$VERBOSE" != "--verbose" ]]; then
|
||||||
|
if [[ "$s3_success" == "true" ]]; then
|
||||||
|
echo "Backup OK: $(basename "$BACKUP_FILE") ($file_size) + S3"
|
||||||
|
else
|
||||||
|
echo "Backup OK: $(basename "$BACKUP_FILE") ($file_size)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Skript ausführen
|
||||||
|
main "$@"
|
||||||
487
scripts/backup/restore-db.sh
Executable file
487
scripts/backup/restore-db.sh
Executable file
|
|
@ -0,0 +1,487 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Payload CMS - PostgreSQL Restore-Skript
|
||||||
|
# Stellt die Datenbank aus einem Backup wieder her.
|
||||||
|
#
|
||||||
|
# Verwendung:
|
||||||
|
# ./restore-db.sh # Interaktiv (zeigt verfügbare Backups)
|
||||||
|
# ./restore-db.sh <backup-datei> # Direktes Restore
|
||||||
|
# ./restore-db.sh --from-s3 # Restore aus S3 (interaktiv)
|
||||||
|
# ./restore-db.sh --from-s3 <backup-name> # Restore aus S3 (direkt)
|
||||||
|
# ./restore-db.sh --list # Backups auflisten
|
||||||
|
# ./restore-db.sh --list-s3 # S3-Backups auflisten
|
||||||
|
#
|
||||||
|
# Voraussetzungen:
|
||||||
|
# - ~/.pgpass mit korrekten Berechtigungen (chmod 600)
|
||||||
|
# - ~/.s3cfg für S3-Restore (chmod 600)
|
||||||
|
#
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Konfiguration
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
# Datenbank-Verbindung (Passwort via ~/.pgpass)
|
||||||
|
DB_HOST="${PGHOST:-10.10.181.101}"
|
||||||
|
DB_PORT="${PGPORT:-5432}"
|
||||||
|
DB_NAME="${PGDATABASE:-payload_db}"
|
||||||
|
DB_USER="${PGUSER:-payload}"
|
||||||
|
|
||||||
|
# Backup-Verzeichnis
|
||||||
|
BACKUP_DIR="/home/payload/backups/postgres"
|
||||||
|
|
||||||
|
# S3-Konfiguration
|
||||||
|
S3_BUCKET="${S3_BUCKET:-c2s}"
|
||||||
|
S3_PATH="${S3_PATH:-backups/postgres}"
|
||||||
|
|
||||||
|
# Temporäres Verzeichnis für S3-Downloads
|
||||||
|
TEMP_DIR="/tmp/payload-restore"
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Farben und Formatierung
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Funktionen
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
print_header() {
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}==============================================${NC}"
|
||||||
|
echo -e "${BLUE} Payload CMS - Datenbank Restore${NC}"
|
||||||
|
echo -e "${BLUE}==============================================${NC}"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
print_step() {
|
||||||
|
echo -e "${BLUE}[$(date '+%H:%M:%S')]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo -e "${GREEN}[✓]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_warning() {
|
||||||
|
echo -e "${YELLOW}[!]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo -e "${RED}[✗]${NC} ERROR: $1" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
check_pgpass() {
|
||||||
|
local pgpass_file="$HOME/.pgpass"
|
||||||
|
|
||||||
|
if [[ ! -f "$pgpass_file" ]]; then
|
||||||
|
print_error ".pgpass nicht gefunden: $pgpass_file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local perms
|
||||||
|
perms=$(stat -c%a "$pgpass_file" 2>/dev/null || echo "000")
|
||||||
|
|
||||||
|
if [[ "$perms" != "600" ]]; then
|
||||||
|
print_error ".pgpass hat unsichere Berechtigungen: $perms (erwartet: 600)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_s3cfg() {
|
||||||
|
local s3cfg_file="$HOME/.s3cfg"
|
||||||
|
|
||||||
|
if [[ ! -f "$s3cfg_file" ]]; then
|
||||||
|
print_error ".s3cfg nicht gefunden - S3-Restore nicht möglich"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v s3cmd &> /dev/null; then
|
||||||
|
print_error "s3cmd nicht installiert"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
test_db_connection() {
|
||||||
|
print_step "Teste Datenbankverbindung..."
|
||||||
|
|
||||||
|
if ! psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" > /dev/null 2>&1; then
|
||||||
|
print_error "Datenbankverbindung fehlgeschlagen!"
|
||||||
|
print_error "Prüfe ~/.pgpass und Netzwerkverbindung zu $DB_HOST:$DB_PORT"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Datenbankverbindung OK"
|
||||||
|
}
|
||||||
|
|
||||||
|
list_local_backups() {
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Lokale Backups in ${BACKUP_DIR}:${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [[ ! -d "$BACKUP_DIR" ]]; then
|
||||||
|
print_warning "Backup-Verzeichnis existiert nicht"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local count=0
|
||||||
|
while IFS= read -r file; do
|
||||||
|
if [[ -n "$file" ]]; then
|
||||||
|
local size
|
||||||
|
size=$(du -h "$file" | cut -f1)
|
||||||
|
local date
|
||||||
|
date=$(stat -c%y "$file" | cut -d'.' -f1)
|
||||||
|
local basename
|
||||||
|
basename=$(basename "$file")
|
||||||
|
|
||||||
|
count=$((count + 1))
|
||||||
|
printf " %2d) %-45s %6s %s\n" "$count" "$basename" "$size" "$date"
|
||||||
|
fi
|
||||||
|
done < <(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f | sort -r)
|
||||||
|
|
||||||
|
if [[ $count -eq 0 ]]; then
|
||||||
|
print_warning "Keine Backups gefunden"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
list_s3_backups() {
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}S3-Backups in s3://${S3_BUCKET}/${S3_PATH}/:${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if ! check_s3cfg; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local count=0
|
||||||
|
while IFS= read -r line; do
|
||||||
|
if [[ -n "$line" ]]; then
|
||||||
|
local date size path
|
||||||
|
date=$(echo "$line" | awk '{print $1, $2}')
|
||||||
|
size=$(echo "$line" | awk '{print $3}')
|
||||||
|
path=$(echo "$line" | awk '{print $4}')
|
||||||
|
local basename
|
||||||
|
basename=$(basename "$path")
|
||||||
|
|
||||||
|
# Größe formatieren
|
||||||
|
if [[ $size -gt 1048576 ]]; then
|
||||||
|
size="$((size / 1048576))M"
|
||||||
|
elif [[ $size -gt 1024 ]]; then
|
||||||
|
size="$((size / 1024))K"
|
||||||
|
else
|
||||||
|
size="${size}B"
|
||||||
|
fi
|
||||||
|
|
||||||
|
count=$((count + 1))
|
||||||
|
printf " %2d) %-45s %6s %s\n" "$count" "$basename" "$size" "$date"
|
||||||
|
fi
|
||||||
|
done < <(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz" | sort -r)
|
||||||
|
|
||||||
|
if [[ $count -eq 0 ]]; then
|
||||||
|
print_warning "Keine S3-Backups gefunden"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
select_local_backup() {
|
||||||
|
local backups=()
|
||||||
|
|
||||||
|
while IFS= read -r file; do
|
||||||
|
if [[ -n "$file" ]]; then
|
||||||
|
backups+=("$file")
|
||||||
|
fi
|
||||||
|
done < <(find "$BACKUP_DIR" -name "payload_db_*.sql.gz" -type f | sort -r)
|
||||||
|
|
||||||
|
if [[ ${#backups[@]} -eq 0 ]]; then
|
||||||
|
print_error "Keine lokalen Backups gefunden"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
list_local_backups
|
||||||
|
|
||||||
|
read -p "Backup-Nummer auswählen (1-${#backups[@]}): " selection
|
||||||
|
|
||||||
|
if ! [[ "$selection" =~ ^[0-9]+$ ]] || [[ $selection -lt 1 ]] || [[ $selection -gt ${#backups[@]} ]]; then
|
||||||
|
print_error "Ungültige Auswahl"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${backups[$((selection-1))]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
select_s3_backup() {
|
||||||
|
local backups=()
|
||||||
|
|
||||||
|
while IFS= read -r line; do
|
||||||
|
if [[ -n "$line" ]]; then
|
||||||
|
local path
|
||||||
|
path=$(echo "$line" | awk '{print $4}')
|
||||||
|
backups+=("$path")
|
||||||
|
fi
|
||||||
|
done < <(s3cmd ls "s3://${S3_BUCKET}/${S3_PATH}/" 2>/dev/null | grep "payload_db_.*\.sql\.gz" | sort -r)
|
||||||
|
|
||||||
|
if [[ ${#backups[@]} -eq 0 ]]; then
|
||||||
|
print_error "Keine S3-Backups gefunden"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
list_s3_backups
|
||||||
|
|
||||||
|
read -p "Backup-Nummer auswählen (1-${#backups[@]}): " selection
|
||||||
|
|
||||||
|
if ! [[ "$selection" =~ ^[0-9]+$ ]] || [[ $selection -lt 1 ]] || [[ $selection -gt ${#backups[@]} ]]; then
|
||||||
|
print_error "Ungültige Auswahl"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${backups[$((selection-1))]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
download_from_s3() {
|
||||||
|
local s3_path="$1"
|
||||||
|
local filename
|
||||||
|
filename=$(basename "$s3_path")
|
||||||
|
local local_path="${TEMP_DIR}/${filename}"
|
||||||
|
|
||||||
|
mkdir -p "$TEMP_DIR"
|
||||||
|
|
||||||
|
print_step "Lade Backup von S3 herunter: $filename"
|
||||||
|
|
||||||
|
if s3cmd get "$s3_path" "$local_path" 2>/dev/null; then
|
||||||
|
print_success "Download abgeschlossen"
|
||||||
|
echo "$local_path"
|
||||||
|
else
|
||||||
|
print_error "Download fehlgeschlagen"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_backup() {
|
||||||
|
local backup_file="$1"
|
||||||
|
|
||||||
|
print_step "Verifiziere Backup-Integrität..."
|
||||||
|
|
||||||
|
if ! gzip -t "$backup_file" 2>/dev/null; then
|
||||||
|
print_error "Backup-Datei ist beschädigt (gzip-Test fehlgeschlagen)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Backup-Integrität OK"
|
||||||
|
}
|
||||||
|
|
||||||
|
get_db_stats() {
|
||||||
|
local table_count row_count
|
||||||
|
|
||||||
|
table_count=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c \
|
||||||
|
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public';" 2>/dev/null | tr -d ' ')
|
||||||
|
|
||||||
|
row_count=$(psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -c \
|
||||||
|
"SELECT SUM(n_live_tup) FROM pg_stat_user_tables;" 2>/dev/null | tr -d ' ')
|
||||||
|
|
||||||
|
echo "Tabellen: ${table_count:-0}, Zeilen: ${row_count:-0}"
|
||||||
|
}
|
||||||
|
|
||||||
|
stop_application() {
|
||||||
|
print_step "Stoppe Payload CMS..."
|
||||||
|
|
||||||
|
if command -v pm2 &> /dev/null; then
|
||||||
|
if pm2 list 2>/dev/null | grep -q "payload"; then
|
||||||
|
pm2 stop payload 2>/dev/null || true
|
||||||
|
print_success "Payload gestoppt"
|
||||||
|
else
|
||||||
|
print_warning "Payload läuft nicht unter PM2"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if pm2 list 2>/dev/null | grep -q "queue-worker"; then
|
||||||
|
pm2 stop queue-worker 2>/dev/null || true
|
||||||
|
print_success "Queue-Worker gestoppt"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_warning "PM2 nicht verfügbar - bitte Anwendung manuell stoppen!"
|
||||||
|
read -p "Weiter wenn Anwendung gestoppt? [Enter] " -r
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
start_application() {
|
||||||
|
print_step "Starte Payload CMS..."
|
||||||
|
|
||||||
|
if command -v pm2 &> /dev/null; then
|
||||||
|
pm2 start payload 2>/dev/null || true
|
||||||
|
pm2 start queue-worker 2>/dev/null || true
|
||||||
|
print_success "Anwendung gestartet"
|
||||||
|
else
|
||||||
|
print_warning "PM2 nicht verfügbar - bitte Anwendung manuell starten!"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
perform_restore() {
|
||||||
|
local backup_file="$1"
|
||||||
|
local filename
|
||||||
|
filename=$(basename "$backup_file")
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}╔════════════════════════════════════════════════════════════╗${NC}"
|
||||||
|
echo -e "${YELLOW}║ WARNUNG: Datenbank-Restore überschreibt alle Daten! ║${NC}"
|
||||||
|
echo -e "${YELLOW}╚════════════════════════════════════════════════════════════╝${NC}"
|
||||||
|
echo ""
|
||||||
|
echo " Backup: $filename"
|
||||||
|
echo " Ziel: $DB_NAME @ $DB_HOST:$DB_PORT"
|
||||||
|
echo " Aktuell: $(get_db_stats)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
read -p "Restore durchführen? Tippe 'RESTORE' zur Bestätigung: " confirmation
|
||||||
|
|
||||||
|
if [[ "$confirmation" != "RESTORE" ]]; then
|
||||||
|
print_warning "Restore abgebrochen"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Anwendung stoppen
|
||||||
|
stop_application
|
||||||
|
|
||||||
|
# Restore durchführen
|
||||||
|
print_step "Führe Restore durch..."
|
||||||
|
|
||||||
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
|
|
||||||
|
if gunzip -c "$backup_file" | psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -q 2>/dev/null; then
|
||||||
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
|
print_success "Restore abgeschlossen (${duration}s)"
|
||||||
|
else
|
||||||
|
print_error "Restore fehlgeschlagen!"
|
||||||
|
start_application
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Statistik nach Restore
|
||||||
|
print_step "Verifiziere Restore..."
|
||||||
|
echo " Neue Statistik: $(get_db_stats)"
|
||||||
|
|
||||||
|
# Anwendung starten
|
||||||
|
start_application
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}╔════════════════════════════════════════════════════════════╗${NC}"
|
||||||
|
echo -e "${GREEN}║ Restore erfolgreich abgeschlossen! ║${NC}"
|
||||||
|
echo -e "${GREEN}╚════════════════════════════════════════════════════════════╝${NC}"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup_temp() {
|
||||||
|
if [[ -d "$TEMP_DIR" ]]; then
|
||||||
|
rm -rf "$TEMP_DIR"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
show_usage() {
|
||||||
|
echo "Verwendung: $0 [OPTION] [BACKUP-DATEI]"
|
||||||
|
echo ""
|
||||||
|
echo "Optionen:"
|
||||||
|
echo " (keine) Interaktive Auswahl aus lokalen Backups"
|
||||||
|
echo " <backup-datei> Direktes Restore aus lokaler Datei"
|
||||||
|
echo " --from-s3 Interaktive Auswahl aus S3-Backups"
|
||||||
|
echo " --from-s3 <name> Direktes Restore aus S3 (Dateiname)"
|
||||||
|
echo " --list Lokale Backups auflisten"
|
||||||
|
echo " --list-s3 S3-Backups auflisten"
|
||||||
|
echo " --help Diese Hilfe anzeigen"
|
||||||
|
echo ""
|
||||||
|
echo "Beispiele:"
|
||||||
|
echo " $0 # Interaktiv"
|
||||||
|
echo " $0 /path/to/backup.sql.gz # Direkt lokal"
|
||||||
|
echo " $0 --from-s3 # Interaktiv von S3"
|
||||||
|
echo " $0 --from-s3 payload_db_2025-12-11_03-00-00.sql.gz"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Hauptprogramm
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
main() {
|
||||||
|
trap cleanup_temp EXIT
|
||||||
|
|
||||||
|
# Hilfe anzeigen
|
||||||
|
if [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then
|
||||||
|
show_usage
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Backups auflisten
|
||||||
|
if [[ "${1:-}" == "--list" ]]; then
|
||||||
|
list_local_backups
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${1:-}" == "--list-s3" ]]; then
|
||||||
|
list_s3_backups
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_header
|
||||||
|
|
||||||
|
# Voraussetzungen prüfen
|
||||||
|
check_pgpass
|
||||||
|
test_db_connection
|
||||||
|
|
||||||
|
local backup_file=""
|
||||||
|
|
||||||
|
# S3-Restore
|
||||||
|
if [[ "${1:-}" == "--from-s3" ]]; then
|
||||||
|
if ! check_s3cfg; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local s3_path
|
||||||
|
|
||||||
|
if [[ -n "${2:-}" ]]; then
|
||||||
|
# Direkter S3-Pfad
|
||||||
|
s3_path="s3://${S3_BUCKET}/${S3_PATH}/${2}"
|
||||||
|
else
|
||||||
|
# Interaktive Auswahl
|
||||||
|
s3_path=$(select_s3_backup)
|
||||||
|
fi
|
||||||
|
|
||||||
|
backup_file=$(download_from_s3 "$s3_path")
|
||||||
|
|
||||||
|
# Lokales Restore mit Dateiangabe
|
||||||
|
elif [[ -n "${1:-}" ]]; then
|
||||||
|
backup_file="$1"
|
||||||
|
|
||||||
|
if [[ ! -f "$backup_file" ]]; then
|
||||||
|
print_error "Backup-Datei nicht gefunden: $backup_file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Interaktive lokale Auswahl
|
||||||
|
else
|
||||||
|
backup_file=$(select_local_backup)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Backup verifizieren
|
||||||
|
verify_backup "$backup_file"
|
||||||
|
|
||||||
|
# Restore durchführen
|
||||||
|
perform_restore "$backup_file"
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
293
scripts/backup/setup-backup.sh
Executable file
293
scripts/backup/setup-backup.sh
Executable file
|
|
@ -0,0 +1,293 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Payload CMS - Backup-System Setup
|
||||||
|
# Richtet das automatische Backup-System auf einem neuen Server ein.
|
||||||
|
#
|
||||||
|
# Verwendung:
|
||||||
|
# ./setup-backup.sh
|
||||||
|
#
|
||||||
|
# Voraussetzungen:
|
||||||
|
# - PostgreSQL-Client (psql, pg_dump) installiert
|
||||||
|
# - Zugriff auf die Datenbank
|
||||||
|
#
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Konfiguration
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
BACKUP_DIR="/home/payload/backups/postgres"
|
||||||
|
LOG_DIR="/home/payload/logs"
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Funktionen
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
print_header() {
|
||||||
|
echo ""
|
||||||
|
echo "=============================================="
|
||||||
|
echo " Payload CMS - Backup-System Setup"
|
||||||
|
echo "=============================================="
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
print_step() {
|
||||||
|
echo "[$(date '+%H:%M:%S')] $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_success() {
|
||||||
|
echo "[✓] $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_error() {
|
||||||
|
echo "[✗] ERROR: $1" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
check_prerequisites() {
|
||||||
|
print_step "Prüfe Voraussetzungen..."
|
||||||
|
|
||||||
|
if ! command -v psql &> /dev/null; then
|
||||||
|
print_error "psql nicht gefunden. Installiere mit: apt install postgresql-client"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v pg_dump &> /dev/null; then
|
||||||
|
print_error "pg_dump nicht gefunden. Installiere mit: apt install postgresql-client"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "PostgreSQL-Client verfügbar"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_directories() {
|
||||||
|
print_step "Erstelle Verzeichnisse..."
|
||||||
|
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
mkdir -p "$LOG_DIR"
|
||||||
|
|
||||||
|
print_success "Verzeichnisse erstellt"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_pgpass() {
|
||||||
|
local pgpass_file="$HOME/.pgpass"
|
||||||
|
|
||||||
|
print_step "Konfiguriere PostgreSQL-Authentifizierung..."
|
||||||
|
|
||||||
|
if [[ -f "$pgpass_file" ]]; then
|
||||||
|
echo " .pgpass existiert bereits."
|
||||||
|
read -p " Überschreiben? [y/N] " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
print_success ".pgpass beibehalten"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo " PostgreSQL-Verbindungsdaten eingeben:"
|
||||||
|
read -p " Host [10.10.181.101]: " db_host
|
||||||
|
db_host=${db_host:-10.10.181.101}
|
||||||
|
|
||||||
|
read -p " Port [5432]: " db_port
|
||||||
|
db_port=${db_port:-5432}
|
||||||
|
|
||||||
|
read -p " Datenbank [payload_db]: " db_name
|
||||||
|
db_name=${db_name:-payload_db}
|
||||||
|
|
||||||
|
read -p " Benutzer [payload]: " db_user
|
||||||
|
db_user=${db_user:-payload}
|
||||||
|
|
||||||
|
read -sp " Passwort: " db_pass
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [[ -z "$db_pass" ]]; then
|
||||||
|
print_error "Passwort darf nicht leer sein"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${db_host}:${db_port}:${db_name}:${db_user}:${db_pass}" > "$pgpass_file"
|
||||||
|
chmod 600 "$pgpass_file"
|
||||||
|
|
||||||
|
print_success ".pgpass erstellt (chmod 600)"
|
||||||
|
|
||||||
|
# Test connection
|
||||||
|
print_step "Teste Datenbankverbindung..."
|
||||||
|
if psql -h "$db_host" -p "$db_port" -U "$db_user" -d "$db_name" -c "SELECT 1" > /dev/null 2>&1; then
|
||||||
|
print_success "Datenbankverbindung erfolgreich"
|
||||||
|
else
|
||||||
|
print_error "Datenbankverbindung fehlgeschlagen"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_s3() {
|
||||||
|
local s3cfg_file="$HOME/.s3cfg"
|
||||||
|
|
||||||
|
print_step "Konfiguriere S3 Offsite-Backup..."
|
||||||
|
|
||||||
|
read -p " S3 Offsite-Backup aktivieren? [Y/n] " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ $REPLY =~ ^[Nn]$ ]]; then
|
||||||
|
print_success "S3-Backup übersprungen"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if s3cmd is installed
|
||||||
|
if ! command -v s3cmd &> /dev/null; then
|
||||||
|
echo " s3cmd nicht gefunden. Installiere..."
|
||||||
|
if command -v apt &> /dev/null; then
|
||||||
|
sudo apt update && sudo apt install -y s3cmd
|
||||||
|
else
|
||||||
|
print_error "Bitte s3cmd manuell installieren"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f "$s3cfg_file" ]]; then
|
||||||
|
echo " .s3cfg existiert bereits."
|
||||||
|
read -p " Überschreiben? [y/N] " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
print_success ".s3cfg beibehalten"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo " S3-Verbindungsdaten eingeben:"
|
||||||
|
read -p " S3 Host (z.B. fsn1.your-objectstorage.com): " s3_host
|
||||||
|
|
||||||
|
if [[ -z "$s3_host" ]]; then
|
||||||
|
print_error "S3 Host darf nicht leer sein"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
read -p " Access Key: " s3_access_key
|
||||||
|
read -sp " Secret Key: " s3_secret_key
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [[ -z "$s3_access_key" || -z "$s3_secret_key" ]]; then
|
||||||
|
print_error "Access Key und Secret Key dürfen nicht leer sein"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat > "$s3cfg_file" << EOF
|
||||||
|
[default]
|
||||||
|
access_key = ${s3_access_key}
|
||||||
|
secret_key = ${s3_secret_key}
|
||||||
|
host_base = ${s3_host}
|
||||||
|
host_bucket = %(bucket)s.${s3_host}
|
||||||
|
use_https = True
|
||||||
|
signature_v2 = False
|
||||||
|
EOF
|
||||||
|
chmod 600 "$s3cfg_file"
|
||||||
|
|
||||||
|
print_success ".s3cfg erstellt (chmod 600)"
|
||||||
|
|
||||||
|
# Test S3 connection
|
||||||
|
print_step "Teste S3-Verbindung..."
|
||||||
|
if s3cmd ls > /dev/null 2>&1; then
|
||||||
|
print_success "S3-Verbindung erfolgreich"
|
||||||
|
s3cmd ls
|
||||||
|
else
|
||||||
|
print_error "S3-Verbindung fehlgeschlagen"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_backup_script() {
|
||||||
|
print_step "Installiere Backup-Skript..."
|
||||||
|
|
||||||
|
local source_script="${SCRIPT_DIR}/backup-db.sh"
|
||||||
|
local target_script="${BACKUP_DIR}/backup-db.sh"
|
||||||
|
|
||||||
|
if [[ ! -f "$source_script" ]]; then
|
||||||
|
print_error "Backup-Skript nicht gefunden: $source_script"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cp "$source_script" "$target_script"
|
||||||
|
chmod +x "$target_script"
|
||||||
|
|
||||||
|
print_success "Backup-Skript installiert: $target_script"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_cron() {
|
||||||
|
print_step "Konfiguriere Cron-Job..."
|
||||||
|
|
||||||
|
local cron_entry="0 3 * * * ${BACKUP_DIR}/backup-db.sh >> ${LOG_DIR}/backup-cron.log 2>&1"
|
||||||
|
|
||||||
|
# Check if cron entry already exists
|
||||||
|
if crontab -l 2>/dev/null | grep -q "backup-db.sh"; then
|
||||||
|
echo " Cron-Job existiert bereits."
|
||||||
|
read -p " Überschreiben? [y/N] " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
print_success "Cron-Job beibehalten"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
# Remove existing entry
|
||||||
|
crontab -l 2>/dev/null | grep -v "backup-db.sh" | crontab -
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add new cron entry
|
||||||
|
(crontab -l 2>/dev/null; echo "# Payload CMS - Tägliches PostgreSQL Backup"; echo "$cron_entry") | crontab -
|
||||||
|
|
||||||
|
print_success "Cron-Job eingerichtet (täglich um 03:00 Uhr)"
|
||||||
|
}
|
||||||
|
|
||||||
|
test_backup() {
|
||||||
|
print_step "Teste Backup..."
|
||||||
|
|
||||||
|
read -p " Test-Backup jetzt ausführen? [Y/n] " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ $REPLY =~ ^[Nn]$ ]]; then
|
||||||
|
print_success "Test übersprungen"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if "${BACKUP_DIR}/backup-db.sh" --verbose; then
|
||||||
|
print_success "Test-Backup erfolgreich"
|
||||||
|
else
|
||||||
|
print_error "Test-Backup fehlgeschlagen"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
print_summary() {
|
||||||
|
echo ""
|
||||||
|
echo "=============================================="
|
||||||
|
echo " Setup abgeschlossen!"
|
||||||
|
echo "=============================================="
|
||||||
|
echo ""
|
||||||
|
echo " Backup-Skript: ${BACKUP_DIR}/backup-db.sh"
|
||||||
|
echo " Lokale Backups: ${BACKUP_DIR}/"
|
||||||
|
echo " Log-Dateien: ${BACKUP_DIR}/backup.log"
|
||||||
|
echo " ${LOG_DIR}/backup-cron.log"
|
||||||
|
echo ""
|
||||||
|
echo " Cron-Job: Täglich um 03:00 Uhr"
|
||||||
|
echo ""
|
||||||
|
echo " Manuelle Ausführung:"
|
||||||
|
echo " ${BACKUP_DIR}/backup-db.sh --verbose"
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Hauptprogramm
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
main() {
|
||||||
|
print_header
|
||||||
|
check_prerequisites
|
||||||
|
setup_directories
|
||||||
|
setup_pgpass
|
||||||
|
setup_s3
|
||||||
|
install_backup_script
|
||||||
|
setup_cron
|
||||||
|
test_backup
|
||||||
|
print_summary
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
Loading…
Reference in a new issue