feat(monitoring): add database migration for 4 monitoring collections

Creates tables, enums, and indexes for monitoring_snapshots,
monitoring_logs, monitoring_alert_rules, and monitoring_alert_history.
Includes hasMany select tables and the critical
payload_locked_documents_rels columns.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Martin Porwoll 2026-02-15 00:19:35 +00:00
parent 34becc8f49
commit ee981c32bc
2 changed files with 259 additions and 0 deletions

View file

@ -0,0 +1,253 @@
import { MigrateUpArgs, MigrateDownArgs, sql } from '@payloadcms/db-postgres'
/**
* Migration: Add Monitoring Collections
*
* Creates 4 monitoring collections:
* - monitoring_snapshots (historical system metrics)
* - monitoring_logs (structured business event logs)
* - monitoring_alert_rules (configurable alert rules)
* - monitoring_alert_history (alert log, WORM)
*
* Also creates required enum types and array tables for hasMany select fields.
*/
export async function up({ db }: MigrateUpArgs): Promise<void> {
// Step 1: Create enum types for select fields
await db.execute(sql`
CREATE TYPE "public"."enum_monitoring_logs_level" AS ENUM('debug', 'info', 'warn', 'error', 'fatal');
CREATE TYPE "public"."enum_monitoring_logs_source" AS ENUM('payload', 'queue-worker', 'cron', 'email', 'oauth', 'sync');
CREATE TYPE "public"."enum_monitoring_alert_rules_condition" AS ENUM('gt', 'lt', 'eq', 'gte', 'lte');
CREATE TYPE "public"."enum_monitoring_alert_rules_severity" AS ENUM('warning', 'error', 'critical');
CREATE TYPE "public"."enum_monitoring_alert_rules_channels" AS ENUM('email', 'slack', 'discord');
CREATE TYPE "public"."enum_monitoring_alert_history_severity" AS ENUM('warning', 'error', 'critical');
CREATE TYPE "public"."enum_monitoring_alert_history_channels_sent" AS ENUM('email', 'slack', 'discord');
`)
// Step 2: Create monitoring_snapshots
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_snapshots" (
"id" serial PRIMARY KEY NOT NULL,
"timestamp" timestamp(3) with time zone NOT NULL,
"system_cpu_usage_percent" numeric,
"system_memory_used_m_b" numeric,
"system_memory_total_m_b" numeric,
"system_memory_usage_percent" numeric,
"system_disk_used_g_b" numeric,
"system_disk_total_g_b" numeric,
"system_disk_usage_percent" numeric,
"system_load_avg1" numeric,
"system_load_avg5" numeric,
"system_uptime" numeric,
"services_payload" jsonb,
"services_queue_worker" jsonb,
"services_postgresql" jsonb,
"services_pgbouncer" jsonb,
"services_redis" jsonb,
"external_smtp" jsonb,
"external_meta_o_auth" jsonb,
"external_youtube_o_auth" jsonb,
"external_cron_jobs" jsonb,
"performance_avg_response_time_ms" numeric,
"performance_p95_response_time_ms" numeric,
"performance_p99_response_time_ms" numeric,
"performance_error_rate" numeric,
"performance_requests_per_minute" numeric,
"updated_at" timestamp(3) with time zone DEFAULT now() NOT NULL,
"created_at" timestamp(3) with time zone DEFAULT now() NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_snapshots_timestamp_idx" ON "monitoring_snapshots" USING btree ("timestamp");
CREATE INDEX IF NOT EXISTS "monitoring_snapshots_updated_at_idx" ON "monitoring_snapshots" USING btree ("updated_at");
CREATE INDEX IF NOT EXISTS "monitoring_snapshots_created_at_idx" ON "monitoring_snapshots" USING btree ("created_at");
`)
// Step 3: Create monitoring_logs
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_logs" (
"id" serial PRIMARY KEY NOT NULL,
"level" "enum_monitoring_logs_level" NOT NULL,
"source" "enum_monitoring_logs_source" NOT NULL,
"message" varchar NOT NULL,
"context" jsonb,
"request_id" varchar,
"user_id_id" integer REFERENCES users(id) ON DELETE SET NULL,
"tenant_id" integer REFERENCES tenants(id) ON DELETE SET NULL,
"duration" numeric,
"updated_at" timestamp(3) with time zone DEFAULT now() NOT NULL,
"created_at" timestamp(3) with time zone DEFAULT now() NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_logs_level_idx" ON "monitoring_logs" USING btree ("level");
CREATE INDEX IF NOT EXISTS "monitoring_logs_source_idx" ON "monitoring_logs" USING btree ("source");
CREATE INDEX IF NOT EXISTS "monitoring_logs_user_id_idx" ON "monitoring_logs" USING btree ("user_id_id");
CREATE INDEX IF NOT EXISTS "monitoring_logs_tenant_idx" ON "monitoring_logs" USING btree ("tenant_id");
CREATE INDEX IF NOT EXISTS "monitoring_logs_updated_at_idx" ON "monitoring_logs" USING btree ("updated_at");
CREATE INDEX IF NOT EXISTS "monitoring_logs_created_at_idx" ON "monitoring_logs" USING btree ("created_at");
`)
// Step 4: Create monitoring_alert_rules + hasMany select table + array table
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_alert_rules" (
"id" serial PRIMARY KEY NOT NULL,
"name" varchar NOT NULL,
"metric" varchar NOT NULL,
"condition" "enum_monitoring_alert_rules_condition" NOT NULL,
"threshold" numeric NOT NULL,
"severity" "enum_monitoring_alert_rules_severity" NOT NULL,
"recipients_slack_webhook" varchar,
"recipients_discord_webhook" varchar,
"cooldown_minutes" numeric DEFAULT 15,
"enabled" boolean DEFAULT true,
"tenant_id" integer REFERENCES tenants(id) ON DELETE SET NULL,
"updated_at" timestamp(3) with time zone DEFAULT now() NOT NULL,
"created_at" timestamp(3) with time zone DEFAULT now() NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_tenant_idx" ON "monitoring_alert_rules" USING btree ("tenant_id");
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_updated_at_idx" ON "monitoring_alert_rules" USING btree ("updated_at");
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_created_at_idx" ON "monitoring_alert_rules" USING btree ("created_at");
`)
// Step 5: Create hasMany select table for channels
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_alert_rules_channels" (
"order" integer NOT NULL,
"parent_id" integer NOT NULL,
"value" "enum_monitoring_alert_rules_channels",
"id" serial PRIMARY KEY NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_channels_order_idx" ON "monitoring_alert_rules_channels" USING btree ("order");
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_channels_parent_idx" ON "monitoring_alert_rules_channels" USING btree ("parent_id");
ALTER TABLE "monitoring_alert_rules_channels"
ADD CONSTRAINT "monitoring_alert_rules_channels_parent_fk"
FOREIGN KEY ("parent_id") REFERENCES "public"."monitoring_alert_rules"("id") ON DELETE CASCADE ON UPDATE NO ACTION;
`)
// Step 6: Create array table for recipients.emails
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_alert_rules_recipients_emails" (
"id" serial PRIMARY KEY NOT NULL,
"_order" integer NOT NULL,
"_parent_id" integer NOT NULL REFERENCES monitoring_alert_rules(id) ON DELETE CASCADE,
"email" varchar NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_recipients_emails_order_idx" ON "monitoring_alert_rules_recipients_emails" USING btree ("_order");
CREATE INDEX IF NOT EXISTS "monitoring_alert_rules_recipients_emails_parent_idx" ON "monitoring_alert_rules_recipients_emails" USING btree ("_parent_id");
`)
// Step 7: Create monitoring_alert_history + hasMany select table
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_alert_history" (
"id" serial PRIMARY KEY NOT NULL,
"rule_id" integer REFERENCES monitoring_alert_rules(id) ON DELETE SET NULL,
"metric" varchar NOT NULL,
"value" numeric NOT NULL,
"threshold" numeric NOT NULL,
"severity" "enum_monitoring_alert_history_severity" NOT NULL,
"message" varchar NOT NULL,
"resolved_at" timestamp(3) with time zone,
"acknowledged_by_id" integer REFERENCES users(id) ON DELETE SET NULL,
"updated_at" timestamp(3) with time zone DEFAULT now() NOT NULL,
"created_at" timestamp(3) with time zone DEFAULT now() NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_alert_history_rule_idx" ON "monitoring_alert_history" USING btree ("rule_id");
CREATE INDEX IF NOT EXISTS "monitoring_alert_history_acknowledged_by_idx" ON "monitoring_alert_history" USING btree ("acknowledged_by_id");
CREATE INDEX IF NOT EXISTS "monitoring_alert_history_updated_at_idx" ON "monitoring_alert_history" USING btree ("updated_at");
CREATE INDEX IF NOT EXISTS "monitoring_alert_history_created_at_idx" ON "monitoring_alert_history" USING btree ("created_at");
`)
// Step 8: Create hasMany select table for channelsSent
await db.execute(sql`
CREATE TABLE IF NOT EXISTS "monitoring_alert_history_channels_sent" (
"order" integer NOT NULL,
"parent_id" integer NOT NULL,
"value" "enum_monitoring_alert_history_channels_sent",
"id" serial PRIMARY KEY NOT NULL
);
CREATE INDEX IF NOT EXISTS "monitoring_alert_history_channels_sent_order_idx" ON "monitoring_alert_history_channels_sent" USING btree ("order");
CREATE INDEX IF NOT EXISTS "monitoring_alert_history_channels_sent_parent_idx" ON "monitoring_alert_history_channels_sent" USING btree ("parent_id");
ALTER TABLE "monitoring_alert_history_channels_sent"
ADD CONSTRAINT "monitoring_alert_history_channels_sent_parent_fk"
FOREIGN KEY ("parent_id") REFERENCES "public"."monitoring_alert_history"("id") ON DELETE CASCADE ON UPDATE NO ACTION;
`)
// Step 9: CRITICAL - Update payload_locked_documents_rels system table
await db.execute(sql`
ALTER TABLE "payload_locked_documents_rels"
ADD COLUMN IF NOT EXISTS "monitoring_snapshots_id" integer REFERENCES monitoring_snapshots(id) ON DELETE CASCADE;
ALTER TABLE "payload_locked_documents_rels"
ADD COLUMN IF NOT EXISTS "monitoring_logs_id" integer REFERENCES monitoring_logs(id) ON DELETE CASCADE;
ALTER TABLE "payload_locked_documents_rels"
ADD COLUMN IF NOT EXISTS "monitoring_alert_rules_id" integer REFERENCES monitoring_alert_rules(id) ON DELETE CASCADE;
ALTER TABLE "payload_locked_documents_rels"
ADD COLUMN IF NOT EXISTS "monitoring_alert_history_id" integer REFERENCES monitoring_alert_history(id) ON DELETE CASCADE;
CREATE INDEX IF NOT EXISTS "payload_locked_documents_rels_monitoring_snapshots_idx"
ON "payload_locked_documents_rels" USING btree ("monitoring_snapshots_id");
CREATE INDEX IF NOT EXISTS "payload_locked_documents_rels_monitoring_logs_idx"
ON "payload_locked_documents_rels" USING btree ("monitoring_logs_id");
CREATE INDEX IF NOT EXISTS "payload_locked_documents_rels_monitoring_alert_rules_idx"
ON "payload_locked_documents_rels" USING btree ("monitoring_alert_rules_id");
CREATE INDEX IF NOT EXISTS "payload_locked_documents_rels_monitoring_alert_history_idx"
ON "payload_locked_documents_rels" USING btree ("monitoring_alert_history_id");
`)
}
export async function down({ db }: MigrateDownArgs): Promise<void> {
// Drop payload_locked_documents_rels columns first
await db.execute(sql`
DROP INDEX IF EXISTS "payload_locked_documents_rels_monitoring_alert_history_idx";
DROP INDEX IF EXISTS "payload_locked_documents_rels_monitoring_alert_rules_idx";
DROP INDEX IF EXISTS "payload_locked_documents_rels_monitoring_logs_idx";
DROP INDEX IF EXISTS "payload_locked_documents_rels_monitoring_snapshots_idx";
ALTER TABLE "payload_locked_documents_rels" DROP COLUMN IF EXISTS "monitoring_alert_history_id";
ALTER TABLE "payload_locked_documents_rels" DROP COLUMN IF EXISTS "monitoring_alert_rules_id";
ALTER TABLE "payload_locked_documents_rels" DROP COLUMN IF EXISTS "monitoring_logs_id";
ALTER TABLE "payload_locked_documents_rels" DROP COLUMN IF EXISTS "monitoring_snapshots_id";
`)
// Drop hasMany select tables
await db.execute(sql`
DROP TABLE IF EXISTS "monitoring_alert_history_channels_sent" CASCADE;
DROP TABLE IF EXISTS "monitoring_alert_rules_channels" CASCADE;
`)
// Drop array tables
await db.execute(sql`
DROP TABLE IF EXISTS "monitoring_alert_rules_recipients_emails" CASCADE;
`)
// Drop main tables in reverse dependency order
await db.execute(sql`
DROP TABLE IF EXISTS "monitoring_alert_history" CASCADE;
DROP TABLE IF EXISTS "monitoring_alert_rules" CASCADE;
DROP TABLE IF EXISTS "monitoring_logs" CASCADE;
DROP TABLE IF EXISTS "monitoring_snapshots" CASCADE;
`)
// Drop enum types
await db.execute(sql`
DROP TYPE IF EXISTS "public"."enum_monitoring_alert_history_channels_sent";
DROP TYPE IF EXISTS "public"."enum_monitoring_alert_history_severity";
DROP TYPE IF EXISTS "public"."enum_monitoring_alert_rules_channels";
DROP TYPE IF EXISTS "public"."enum_monitoring_alert_rules_severity";
DROP TYPE IF EXISTS "public"."enum_monitoring_alert_rules_condition";
DROP TYPE IF EXISTS "public"."enum_monitoring_logs_source";
DROP TYPE IF EXISTS "public"."enum_monitoring_logs_level";
`)
}

View file

@ -35,6 +35,7 @@ import * as migration_20260113_180000_add_community_phase1 from './20260113_1800
import * as migration_20260114_200000_fix_community_role_enum from './20260114_200000_fix_community_role_enum'; import * as migration_20260114_200000_fix_community_role_enum from './20260114_200000_fix_community_role_enum';
import * as migration_20260116_100000_add_token_notification_fields from './20260116_100000_add_token_notification_fields'; import * as migration_20260116_100000_add_token_notification_fields from './20260116_100000_add_token_notification_fields';
import * as migration_20260116_120000_add_report_schedules from './20260116_120000_add_report_schedules'; import * as migration_20260116_120000_add_report_schedules from './20260116_120000_add_report_schedules';
import * as migration_20260215_120000_add_monitoring_collections from './20260215_120000_add_monitoring_collections';
export const migrations = [ export const migrations = [
{ {
@ -222,4 +223,9 @@ export const migrations = [
down: migration_20260116_120000_add_report_schedules.down, down: migration_20260116_120000_add_report_schedules.down,
name: '20260116_120000_add_report_schedules' name: '20260116_120000_add_report_schedules'
}, },
{
up: migration_20260215_120000_add_monitoring_collections.up,
down: migration_20260215_120000_add_monitoring_collections.down,
name: '20260215_120000_add_monitoring_collections'
},
]; ];