diff --git a/services/arbiter-3.0/migrations/094_global_restart_scheduler.sql b/services/arbiter-3.0/migrations/094_global_restart_scheduler.sql new file mode 100644 index 0000000..0b38630 --- /dev/null +++ b/services/arbiter-3.0/migrations/094_global_restart_scheduler.sql @@ -0,0 +1,57 @@ +-- Task #94: Global Restart Scheduler +-- Migration for arbiter_db +-- Run: psql -U arbiter -d arbiter_db -f 094_global_restart_scheduler.sql + +-- 1. Configuration for Node-wide Stagger Logic +CREATE TABLE IF NOT EXISTS global_restart_config ( + id SERIAL PRIMARY KEY, + node VARCHAR(10) UNIQUE NOT NULL, -- 'TX1', 'NC1' + base_time TIME NOT NULL, -- e.g., '04:00:00' (UTC) + interval_minutes INT DEFAULT 5, -- Stagger gap + is_enabled BOOLEAN DEFAULT true, -- Global master switch per node + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_by VARCHAR(50) -- Discord Username +); + +-- 2. Individual Server Execution State +CREATE TABLE IF NOT EXISTS server_restart_schedules ( + id SERIAL PRIMARY KEY, + server_id VARCHAR(50) UNIQUE NOT NULL, -- Pterodactyl 8-char short ID + server_name VARCHAR(100) NOT NULL, + node VARCHAR(10) NOT NULL, + sort_order INT NOT NULL DEFAULT 0, -- Manual boot order + effective_time TIME, -- Calculated: base + (sort * interval) + ptero_schedule_id INT DEFAULT NULL, -- ID of schedule on Pterodactyl + skip_restart BOOLEAN DEFAULT false, -- Individual "Maintenance Mode" + sync_status VARCHAR(20) DEFAULT 'PENDING', -- 'SUCCESS', 'PENDING', 'FAILED' + last_error TEXT DEFAULT NULL, -- API error capture + last_synced_at TIMESTAMP NULL, + + CONSTRAINT fk_node_config + FOREIGN KEY (node) + REFERENCES global_restart_config(node) + ON UPDATE CASCADE +); + +-- 3. Audit Trail for Sync Operations +CREATE TABLE IF NOT EXISTS sync_logs ( + id SERIAL PRIMARY KEY, + server_id VARCHAR(50) NOT NULL, + action VARCHAR(255) NOT NULL, -- e.g., 'Deleted Rogue Schedule', 'Created Schedule' + status VARCHAR(20) NOT NULL, -- 'SUCCESS', 'FAILED' + error_message TEXT DEFAULT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- 4. Performance Indexes +CREATE INDEX IF NOT EXISTS idx_server_node_order ON server_restart_schedules (node, sort_order); +CREATE INDEX IF NOT EXISTS idx_sync_status ON server_restart_schedules (sync_status); +CREATE INDEX IF NOT EXISTS idx_sync_logs_server ON sync_logs (server_id); +CREATE INDEX IF NOT EXISTS idx_sync_logs_created ON sync_logs (created_at); + +-- 5. Initial Seed Data +INSERT INTO global_restart_config (node, base_time, interval_minutes, updated_by) +VALUES + ('TX1', '04:00:00', 5, 'The Wizard'), + ('NC1', '04:30:00', 5, 'The Wizard') +ON CONFLICT (node) DO NOTHING; diff --git a/services/arbiter-3.0/src/lib/ptero-sync.js b/services/arbiter-3.0/src/lib/ptero-sync.js new file mode 100644 index 0000000..4f228f8 --- /dev/null +++ b/services/arbiter-3.0/src/lib/ptero-sync.js @@ -0,0 +1,174 @@ +const axios = require('axios'); +const db = require('../database'); + +const PTERO_URL = 'https://panel.firefrostgaming.com/api/client/servers'; + +function getHeaders() { + return { + 'Authorization': `Bearer ${process.env.PTERO_CLIENT_KEY}`, + 'Content-Type': 'application/json', + 'Accept': 'application/json' + }; +} + +// Rate limit helper - 200ms between calls +const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms)); + +/** + * Sync a single server's schedule to Pterodactyl + */ +async function syncToPterodactyl(serverId) { + const result = await db.query('SELECT * FROM server_restart_schedules WHERE server_id = $1', [serverId]); + const server = result.rows[0]; + + if (!server) { + return { success: false, error: 'Server not found in database' }; + } + + const [hour, minute] = server.effective_time.split(':'); + const pteroUrl = `${PTERO_URL}/${server.server_id}/schedules`; + + const payload = { + name: "[Trinity] Daily Restart", + minute, + hour, + day_of_week: "*", + day_of_month: "*", + month: "*", + is_active: !server.skip_restart + }; + + try { + let scheduleId = server.ptero_schedule_id; + + if (!scheduleId) { + // Create new schedule + const res = await axios.post(pteroUrl, payload, { headers: getHeaders() }); + scheduleId = res.data.attributes.id; + + // Attach the restart task + await sleep(200); + await axios.post(`${pteroUrl}/${scheduleId}/tasks`, { + action: "power", + payload: "restart", + time_offset: 0 + }, { headers: getHeaders() }); + } else { + // Update existing schedule + await axios.post(`${pteroUrl}/${scheduleId}`, payload, { headers: getHeaders() }); + } + + await db.query( + `UPDATE server_restart_schedules + SET ptero_schedule_id = $1, sync_status = $2, last_error = NULL, last_synced_at = NOW() + WHERE server_id = $3`, + [scheduleId, 'SUCCESS', server.server_id] + ); + + // Log success + await db.query( + `INSERT INTO sync_logs (server_id, action, status) VALUES ($1, $2, $3)`, + [server.server_id, 'Created/Updated Schedule', 'SUCCESS'] + ); + + return { success: true, scheduleId }; + } catch (err) { + const errorMsg = err.response?.data?.errors?.[0]?.detail || err.message; + await db.query( + `UPDATE server_restart_schedules + SET sync_status = $1, last_error = $2 + WHERE server_id = $3`, + ['FAILED', errorMsg, server.server_id] + ); + + await db.query( + `INSERT INTO sync_logs (server_id, action, status, error_message) VALUES ($1, $2, $3, $4)`, + [server.server_id, 'Sync Failed', 'FAILED', errorMsg] + ); + + return { success: false, error: errorMsg }; + } +} + +/** + * Find existing restart schedules NOT owned by Trinity + */ +async function auditServerSchedules(serverId, serverName) { + const pteroUrl = `${PTERO_URL}/${serverId}/schedules`; + + try { + const res = await axios.get(pteroUrl, { headers: getHeaders() }); + const schedules = res.data.data || []; + + const rogueSchedules = schedules + .filter(s => !s.attributes.name.startsWith('[Trinity]')) + .filter(s => { + // Check if it looks like a restart schedule + const tasks = s.attributes.relationships?.tasks?.data || []; + return tasks.some(t => t.attributes?.action === 'power'); + }) + .map(s => ({ + id: s.attributes.id, + name: s.attributes.name, + cron: `${s.attributes.minute} ${s.attributes.hour} * * *` + })); + + return { serverId, serverName, rogueSchedules }; + } catch (err) { + return { serverId, serverName, rogueSchedules: [], error: err.message }; + } +} + +/** + * Delete a specific schedule from Pterodactyl + */ +async function deleteSchedule(serverId, scheduleId, scheduleName) { + const pteroUrl = `${PTERO_URL}/${serverId}/schedules/${scheduleId}`; + + try { + await axios.delete(pteroUrl, { headers: getHeaders() }); + + await db.query( + `INSERT INTO sync_logs (server_id, action, status) VALUES ($1, $2, $3)`, + [serverId, `Deleted Rogue Schedule: ${scheduleName}`, 'SUCCESS'] + ); + + return { success: true }; + } catch (err) { + const errorMsg = err.response?.data?.errors?.[0]?.detail || err.message; + + await db.query( + `INSERT INTO sync_logs (server_id, action, status, error_message) VALUES ($1, $2, $3, $4)`, + [serverId, `Failed to Delete: ${scheduleName}`, 'FAILED', errorMsg] + ); + + return { success: false, error: errorMsg }; + } +} + +/** + * Sync all servers for a node + */ +async function syncAllForNode(node) { + const result = await db.query( + 'SELECT server_id FROM server_restart_schedules WHERE node = $1 ORDER BY sort_order', + [node] + ); + + const results = []; + for (const row of result.rows) { + const syncResult = await syncToPterodactyl(row.server_id); + results.push({ serverId: row.server_id, ...syncResult }); + await sleep(200); // Rate limiting + } + + return results; +} + +module.exports = { + syncToPterodactyl, + auditServerSchedules, + deleteSchedule, + syncAllForNode, + sleep +}; diff --git a/services/arbiter-3.0/src/routes/admin/index.js b/services/arbiter-3.0/src/routes/admin/index.js index 7dd4df6..c41f154 100644 --- a/services/arbiter-3.0/src/routes/admin/index.js +++ b/services/arbiter-3.0/src/routes/admin/index.js @@ -9,6 +9,7 @@ const financialsRouter = require('./financials'); const graceRouter = require('./grace'); const auditRouter = require('./audit'); const rolesRouter = require('./roles'); +const schedulerRouter = require('./scheduler'); router.use(requireTrinityAccess); @@ -32,5 +33,6 @@ router.use('/financials', financialsRouter); router.use('/grace', graceRouter); router.use('/audit', auditRouter); router.use('/roles', rolesRouter); +router.use('/scheduler', schedulerRouter); module.exports = router; diff --git a/services/arbiter-3.0/src/routes/admin/scheduler.js b/services/arbiter-3.0/src/routes/admin/scheduler.js new file mode 100644 index 0000000..f72041b --- /dev/null +++ b/services/arbiter-3.0/src/routes/admin/scheduler.js @@ -0,0 +1,307 @@ +const express = require('express'); +const router = express.Router(); +const db = require('../../database'); +const { calculateStagger } = require('../../utils/scheduler'); +const { syncToPterodactyl, auditServerSchedules, deleteSchedule, syncAllForNode, sleep } = require('../../lib/ptero-sync'); + +// GET /admin/scheduler - Main page +router.get('/', async (req, res) => { + try { + // Get config for both nodes + const configResult = await db.query('SELECT * FROM global_restart_config ORDER BY node'); + const configs = configResult.rows; + + // Get all servers ordered by node and sort_order + const serversResult = await db.query(` + SELECT s.*, c.base_time, c.interval_minutes + FROM server_restart_schedules s + JOIN global_restart_config c ON s.node = c.node + ORDER BY s.node, s.sort_order + `); + + res.render('admin/scheduler', { + title: 'Global Restart Scheduler', + configs, + servers: serversResult.rows + }); + } catch (err) { + console.error('Scheduler page error:', err); + res.status(500).send('Error loading scheduler'); + } +}); + +// GET /admin/scheduler/table-only - HTMX partial refresh +router.get('/table-only', async (req, res) => { + try { + const serversResult = await db.query(` + SELECT s.*, c.base_time, c.interval_minutes + FROM server_restart_schedules s + JOIN global_restart_config c ON s.node = c.node + ORDER BY s.node, s.sort_order + `); + + res.render('admin/partials/scheduler-table', { servers: serversResult.rows }); + } catch (err) { + res.status(500).send('Error loading table'); + } +}); + +// POST /admin/scheduler/reorder-servers - Handle drag-and-drop reorder +router.post('/reorder-servers', async (req, res) => { + try { + const { orderedIds } = req.body; + + // Update sort_order for each server + for (let i = 0; i < orderedIds.length; i++) { + await db.query( + 'UPDATE server_restart_schedules SET sort_order = $1 WHERE server_id = $2', + [i, orderedIds[i]] + ); + } + + // Recalculate effective times for each node + for (const node of ['TX1', 'NC1']) { + const configResult = await db.query( + 'SELECT base_time, interval_minutes FROM global_restart_config WHERE node = $1', + [node] + ); + + if (configResult.rows.length === 0) continue; + + const { base_time, interval_minutes } = configResult.rows[0]; + + const serversResult = await db.query( + 'SELECT server_id FROM server_restart_schedules WHERE node = $1 ORDER BY sort_order', + [node] + ); + + const servers = serversResult.rows; + const staggered = calculateStagger(base_time, interval_minutes, servers); + + for (const server of staggered) { + await db.query( + 'UPDATE server_restart_schedules SET effective_time = $1 WHERE server_id = $2', + [server.effective_time, server.server_id] + ); + } + } + + res.json({ success: true }); + } catch (err) { + console.error('Reorder error:', err); + res.status(500).json({ error: err.message }); + } +}); + +// POST /admin/scheduler/update-config - Update node config +router.post('/update-config', async (req, res) => { + try { + const { node, base_time, interval_minutes } = req.body; + const updatedBy = req.session?.user?.username || 'Unknown'; + + await db.query( + `UPDATE global_restart_config + SET base_time = $1, interval_minutes = $2, updated_at = NOW(), updated_by = $3 + WHERE node = $4`, + [base_time, interval_minutes, updatedBy, node] + ); + + // Recalculate effective times for this node + const serversResult = await db.query( + 'SELECT server_id FROM server_restart_schedules WHERE node = $1 ORDER BY sort_order', + [node] + ); + + const servers = serversResult.rows; + const staggered = calculateStagger(base_time, interval_minutes, servers); + + for (const server of staggered) { + await db.query( + 'UPDATE server_restart_schedules SET effective_time = $1, sync_status = $2 WHERE server_id = $3', + [server.effective_time, 'PENDING', server.server_id] + ); + } + + res.redirect('/admin/scheduler'); + } catch (err) { + console.error('Update config error:', err); + res.status(500).send('Error updating config'); + } +}); + +// POST /admin/scheduler/sync/:node - Sync all servers for a node +router.post('/sync/:node', async (req, res) => { + try { + const { node } = req.params; + const results = await syncAllForNode(node); + + const success = results.filter(r => r.success).length; + const failed = results.filter(r => !r.success).length; + + res.json({ success: true, synced: success, failed }); + } catch (err) { + console.error('Sync error:', err); + res.status(500).json({ error: err.message }); + } +}); + +// GET /admin/scheduler/audit/:node - Audit a node for rogue schedules +router.get('/audit/:node', async (req, res) => { + try { + const { node } = req.params; + + const serversResult = await db.query( + 'SELECT server_id, server_name FROM server_restart_schedules WHERE node = $1', + [node] + ); + + const results = []; + let totalRogue = 0; + + for (const server of serversResult.rows) { + const auditResult = await auditServerSchedules(server.server_id, server.server_name); + if (auditResult.rogueSchedules.length > 0) { + results.push(auditResult); + totalRogue += auditResult.rogueSchedules.length; + } + await sleep(200); // Rate limiting + } + + res.render('admin/partials/audit-modal', { + node, + results, + totalRogue, + serverCount: results.length + }); + } catch (err) { + console.error('Audit error:', err); + res.status(500).send('Error running audit'); + } +}); + +// POST /admin/scheduler/audit/nuke/:node - Delete all rogue schedules +router.post('/audit/nuke/:node', async (req, res) => { + try { + const { node } = req.params; + const nukeData = JSON.parse(req.body.nukeData); + + let deleted = 0; + let failed = 0; + + for (const item of nukeData) { + const result = await deleteSchedule(item.serverId, item.scheduleId, item.scheduleName); + if (result.success) { + deleted++; + } else { + failed++; + } + await sleep(200); // Rate limiting + } + + // Return success message as modal replacement + res.send(` +
+ Deleted ${deleted} rogue schedule(s) on ${node}.
+ ${failed > 0 ? `
${failed} failed.` : ''}
+
Manage staggered restart times for all servers
++ Last updated: <%= config.updated_at ? new Date(config.updated_at).toLocaleString() : 'Never' %> + by <%= config.updated_by || 'Unknown' %> +
++ Found <%= totalRogue %> rogue restart schedule(s) across + <%= serverCount %> server(s) on <%= node %>. + These must be removed before Trinity can take control. +
+ +No conflicts found on <%= node %>. Trinity is ready to take control.
+| + | Server | +Node | +Restart Time (UTC) | +Status | +Skip | +
|---|---|---|---|---|---|
| + No servers imported yet. Click "Import Servers" to populate from Pterodactyl. + | +|||||
| + ☰ + | +<%= server.server_name %> | ++ + <%= server.node %> + + | +<%= server.effective_time || 'Not set' %> | ++ <% if (server.sync_status === 'SUCCESS') { %> + ● Synced + <% } else if (server.sync_status === 'FAILED') { %> + ✕ Error + <% } else { %> + ○ Pending + <% } %> + | ++ + | +