Foundation lib for v1.12.0 cron rewrite skill-tree backup/restore.
Zero dependencies. Uses fs.cpSync (recursive + preserveTimestamps) without
dereference (Node 22.17.x regression) and without filter (Windows symlink-
type bug).
- backupDir(srcDir, backupRoot, opts) → {backupPath, retentionDays, restore()}
- Backup-id format YYYY-MM-DDTHH-MM-SS (filesystem-safe; no colons)
- .backup-meta.json sentinel written as first action inside backupPath
- restore() writes .rollback-in-progress at backupRoot BEFORE rmSync+cpSync
so a crashed restore leaves the sentinel for the next run to detect
- detectStaleRollback(backupRoot) — boolean predicate over sentinel
- cleanupOldBackups(backupRoot, retentionDays) — 3-step age resolution:
meta.created_at → dir mtime → skip-with-warning (never delete a dir
whose age cannot be established)
12/12 tests pass: timestamp format, content round-trip, sentinel lifecycle,
retention, mtime fallback, unparseable-meta skip, missing-root no-op.
Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
202 lines
6.1 KiB
JavaScript
202 lines
6.1 KiB
JavaScript
// backup.mjs — Backup + sentinel-guarded rollback for skills/-tree.
|
|
// Zero dependencies. Uses fs.cpSync (recursive + preserveTimestamps) without
|
|
// dereference (Node 22.17.x regression) and without filter (Windows symlink-
|
|
// type bug). Rollback writes a .rollback-in-progress sentinel at backupRoot
|
|
// BEFORE destructive operations and removes it on success — a crash mid-
|
|
// restore leaves the sentinel behind so detectStaleRollback() can flag it.
|
|
|
|
import {
|
|
cpSync,
|
|
rmSync,
|
|
statSync,
|
|
readdirSync,
|
|
readFileSync,
|
|
existsSync,
|
|
unlinkSync,
|
|
mkdirSync,
|
|
} from 'node:fs';
|
|
import { join } from 'node:path';
|
|
import { atomicWriteJson } from './atomic-write.mjs';
|
|
|
|
const META_FILENAME = '.backup-meta.json';
|
|
const SENTINEL_FILENAME = '.rollback-in-progress';
|
|
const DEFAULT_RETENTION_DAYS = 7;
|
|
|
|
/**
|
|
* Produce a filesystem-safe ISO-ish timestamp: YYYY-MM-DDTHH-MM-SS.
|
|
* No colons, no fractional seconds, no Z.
|
|
* @returns {string}
|
|
*/
|
|
export function backupTimestamp(now = new Date()) {
|
|
return now.toISOString().slice(0, 19).replace(/:/g, '-');
|
|
}
|
|
|
|
function readMetaCreatedAt(dir) {
|
|
try {
|
|
const text = readFileSync(join(dir, META_FILENAME), 'utf8');
|
|
const obj = JSON.parse(text);
|
|
if (obj && typeof obj.created_at === 'string') {
|
|
const t = Date.parse(obj.created_at);
|
|
return Number.isFinite(t) ? t : null;
|
|
}
|
|
return null;
|
|
} catch {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Back up srcDir into backupRoot/<timestamp>/. Writes a meta sentinel inside
|
|
* the new backup dir as the first post-copy action.
|
|
*
|
|
* @param {string} srcDir — directory to back up (must exist)
|
|
* @param {string} backupRoot — parent dir for backup-id subdirs
|
|
* @param {object} [opts]
|
|
* @param {number} [opts.retentionDays] — default 7
|
|
* @param {Date} [opts.now] — override clock for testing
|
|
* @returns {{backupPath: string, retentionDays: number, restore: () => void}}
|
|
*/
|
|
export function backupDir(srcDir, backupRoot, opts = {}) {
|
|
if (!srcDir || typeof srcDir !== 'string') {
|
|
throw new Error('backupDir: srcDir is required');
|
|
}
|
|
if (!backupRoot || typeof backupRoot !== 'string') {
|
|
throw new Error('backupDir: backupRoot is required');
|
|
}
|
|
if (!existsSync(srcDir)) {
|
|
throw new Error(`backupDir: srcDir does not exist: ${srcDir}`);
|
|
}
|
|
const retentionDays = opts.retentionDays ?? DEFAULT_RETENTION_DAYS;
|
|
const now = opts.now ?? new Date();
|
|
mkdirSync(backupRoot, { recursive: true });
|
|
const backupPath = join(backupRoot, backupTimestamp(now));
|
|
|
|
cpSync(srcDir, backupPath, {
|
|
recursive: true,
|
|
force: true,
|
|
preserveTimestamps: true,
|
|
});
|
|
|
|
// First action inside backupPath after cpSync — write meta sentinel.
|
|
atomicWriteJson(join(backupPath, META_FILENAME), {
|
|
created_at: now.toISOString(),
|
|
src_dir: srcDir,
|
|
schema_version: 1,
|
|
});
|
|
|
|
const restore = () => {
|
|
const sentinelPath = join(backupRoot, SENTINEL_FILENAME);
|
|
atomicWriteJson(sentinelPath, {
|
|
backup_path: backupPath,
|
|
src_dir: srcDir,
|
|
started_at: new Date().toISOString(),
|
|
schema_version: 1,
|
|
});
|
|
try {
|
|
rmSync(srcDir, {
|
|
recursive: true,
|
|
force: true,
|
|
maxRetries: 3,
|
|
retryDelay: 200,
|
|
});
|
|
cpSync(backupPath, srcDir, {
|
|
recursive: true,
|
|
force: true,
|
|
preserveTimestamps: true,
|
|
});
|
|
// Remove the meta file we copied back into srcDir so srcDir is clean.
|
|
const restoredMeta = join(srcDir, META_FILENAME);
|
|
if (existsSync(restoredMeta)) {
|
|
try {
|
|
unlinkSync(restoredMeta);
|
|
} catch {
|
|
// best-effort
|
|
}
|
|
}
|
|
} finally {
|
|
// Sentinel removed only on success path — leave it on throw so the
|
|
// post-mortem detector can see the orphan.
|
|
}
|
|
try {
|
|
unlinkSync(sentinelPath);
|
|
} catch {
|
|
// best-effort
|
|
}
|
|
};
|
|
|
|
return { backupPath, retentionDays, restore };
|
|
}
|
|
|
|
/**
|
|
* True if a stale rollback sentinel exists at backupRoot.
|
|
* @param {string} backupRoot
|
|
* @returns {boolean}
|
|
*/
|
|
export function detectStaleRollback(backupRoot) {
|
|
if (!backupRoot || typeof backupRoot !== 'string') return false;
|
|
return existsSync(join(backupRoot, SENTINEL_FILENAME));
|
|
}
|
|
|
|
/**
|
|
* Resolve the effective creation time of a backup dir.
|
|
* Order: meta.created_at → dir mtime → null (skip with warning upstream).
|
|
*/
|
|
function resolveBackupAge(dir) {
|
|
const fromMeta = readMetaCreatedAt(dir);
|
|
if (fromMeta != null) return fromMeta;
|
|
try {
|
|
return statSync(dir).mtimeMs;
|
|
} catch {
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Delete backup directories under backupRoot older than retentionDays.
|
|
* Skips dirs with unresolvable age (logs a warning) rather than deleting them.
|
|
*
|
|
* @param {string} backupRoot
|
|
* @param {number} [retentionDays] — default 7
|
|
* @param {object} [opts]
|
|
* @param {(msg: string) => void} [opts.warn] — default console.warn
|
|
* @param {Date} [opts.now] — override clock for testing
|
|
* @returns {{kept: string[], deleted: string[], skipped: string[]}}
|
|
*/
|
|
export function cleanupOldBackups(backupRoot, retentionDays = DEFAULT_RETENTION_DAYS, opts = {}) {
|
|
const result = { kept: [], deleted: [], skipped: [] };
|
|
if (!backupRoot || !existsSync(backupRoot)) return result;
|
|
const warn = opts.warn ?? ((m) => console.warn(m));
|
|
const now = opts.now ?? new Date();
|
|
const cutoffMs = now.getTime() - retentionDays * 24 * 60 * 60 * 1000;
|
|
|
|
let entries;
|
|
try {
|
|
entries = readdirSync(backupRoot, { withFileTypes: true });
|
|
} catch (err) {
|
|
warn(`cleanupOldBackups: cannot read ${backupRoot}: ${err.message}`);
|
|
return result;
|
|
}
|
|
|
|
for (const entry of entries) {
|
|
if (!entry.isDirectory()) continue;
|
|
const full = join(backupRoot, entry.name);
|
|
const ageMs = resolveBackupAge(full);
|
|
if (ageMs == null) {
|
|
warn(`cleanupOldBackups: skipping ${full} — cannot resolve age`);
|
|
result.skipped.push(full);
|
|
continue;
|
|
}
|
|
if (ageMs < cutoffMs) {
|
|
try {
|
|
rmSync(full, { recursive: true, force: true, maxRetries: 3, retryDelay: 200 });
|
|
result.deleted.push(full);
|
|
} catch (err) {
|
|
warn(`cleanupOldBackups: failed to delete ${full}: ${err.message}`);
|
|
result.skipped.push(full);
|
|
}
|
|
} else {
|
|
result.kept.push(full);
|
|
}
|
|
}
|
|
return result;
|
|
}
|