Reconcile README/CLAUDE.md/commands/agents to filesystem truth ahead of v5.0.0 release. Self-audit --check-readme now passes (counts: scanners 12, commands 18, tests 635, knowledge 8, agents 6, hooks 4). Self-audit (scanners/self-audit.mjs): - Exclude plugin-health-scanner.mjs from countScannerShape (it is a "standalone" scanner per README/CLAUDE.md taxonomy; orchestrated scanners stay at 12) - countTestCases: spawn `node --test` and parse the `tests N` line so the badge reflects test cases (635), not test files (36). countTestFiles kept as fallback when subprocess fails. README.md: - Badges: scanners 9→12, commands 17→18, tests 543→635 - Body counts updated: 8 quality scanners → 12 deterministic scanners; 8 quality areas → 10 (incl. Plugin Hygiene from N6); 9 Node.js scanners → 12 - Scanner table extended with CPS / DIS / COL rows; TOK row reflects the v5 Pattern E/F/N1 expansion (sonnet-era removed) - CLI table adds manifest, whats-active, --accurate-tokens, --with-telemetry-recipe - Knowledge table adds opus-4.7-patterns.md and cache-telemetry-recipe.md - Scanner Lib table notes WEIGHTS export, severity-weighted scoring, tokenizer-api - Action Engines table adds manifest.mjs, whats-active.mjs, token-hotspots-cli.mjs - Test count text 486→635, file count 27→36 (12 lib + 23 scanner + 1 hook) - Tokens command: 4-pattern phrasing → 6 patterns + --accurate-tokens - Adds /config-audit manifest and /config-audit whats-active to command tables CLAUDE.md: - Posture row: 8 → 10 quality areas - Tokens row: 4 patterns (incl. sonnet-era) → 6 patterns + --accurate-tokens - Adds /config-audit manifest entry - Scanner table: TOK description rewritten; CPS, DIS, COL rows added - Scanner Lib table: tokenizer-api.mjs added; v5 annotations on severity, output, scoring, active-config-reader - Action Engines table: manifest.mjs added; token-hotspots-cli.mjs flags expanded - Knowledge table: cache-telemetry-recipe.md added; configuration-best-practices notes Opus-4.7 cache-stability rewrite - Finding ID examples extended with CA-TOK-005, CA-CPS-001, CA-DIS-001, CA-COL-001 - Test count text 543→635, file count 31→36 commands/help.md: tokens/manifest added to Core commands/posture.md: 8 → 10 quality areas commands/config-audit.md: argument-hint adds tokens/manifest; router adds tokens and manifest; "Running 8 configuration scanners" → 12 agents/feature-gap-agent.md: 8 → 10 quality areas No production code paths changed beyond self-audit's badge-counting heuristic. All 635 tests still green. Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
346 lines
12 KiB
JavaScript
346 lines
12 KiB
JavaScript
#!/usr/bin/env node
|
||
|
||
/**
|
||
* Config-Audit Self-Audit
|
||
* Runs the plugin's own scanners on its own configuration.
|
||
* CLI: node self-audit.mjs [--json] [--fix]
|
||
* Exit codes: 0=PASS (no critical/high), 1=WARN (high findings), 2=FAIL (critical findings)
|
||
* Zero external dependencies.
|
||
*/
|
||
|
||
import { resolve, dirname, join } from 'node:path';
|
||
import { fileURLToPath } from 'node:url';
|
||
import { readdir, readFile, stat } from 'node:fs/promises';
|
||
import { execFile } from 'node:child_process';
|
||
import { promisify } from 'node:util';
|
||
import { runAllScanners } from './scan-orchestrator.mjs';
|
||
import { scan as scanPluginHealth } from './plugin-health-scanner.mjs';
|
||
import { scoreByArea } from './lib/scoring.mjs';
|
||
import { gradeFromPassRate } from './lib/severity.mjs';
|
||
import { loadSuppressions, applySuppressions } from './lib/suppression.mjs';
|
||
import { parseJson } from './lib/yaml-parser.mjs';
|
||
|
||
const execFileAsync = promisify(execFile);
|
||
|
||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||
const PLUGIN_ROOT = resolve(__dirname, '..');
|
||
|
||
// Scanner-shape detection: files in scanners/ that export `scan` and are not
|
||
// support modules. Matches the detection rule from v5 plan Step 16.
|
||
//
|
||
// `plugin-health-scanner.mjs` is excluded from the main scanner count: it has
|
||
// `export async function scan` but it runs standalone (not via scan-orchestrator)
|
||
// and is documented under "Standalone Scanner" in README/CLAUDE.md. The badge
|
||
// `scanners-12` reflects the orchestrated scanners that contribute to posture
|
||
// scoring.
|
||
const SCANNER_EXCLUDES = new Set([
|
||
'scan-orchestrator.mjs',
|
||
'self-audit.mjs',
|
||
'whats-active.mjs',
|
||
'plugin-health-scanner.mjs',
|
||
]);
|
||
|
||
function isScannerShape(name, content) {
|
||
if (!name.endsWith('.mjs')) return false;
|
||
if (SCANNER_EXCLUDES.has(name)) return false;
|
||
if (/-cli\.mjs$/.test(name)) return false;
|
||
if (/-engine\.mjs$/.test(name)) return false;
|
||
return /export\s+async\s+function\s+scan\b/.test(content);
|
||
}
|
||
|
||
async function safeListDir(path) {
|
||
try { return await readdir(path, { withFileTypes: true }); } catch { return []; }
|
||
}
|
||
|
||
async function countScannerShape(scannersDir) {
|
||
let count = 0;
|
||
for (const e of await safeListDir(scannersDir)) {
|
||
if (!e.isFile()) continue;
|
||
if (!e.name.endsWith('.mjs')) continue;
|
||
let content = '';
|
||
try { content = await readFile(join(scannersDir, e.name), 'utf-8'); } catch { continue; }
|
||
if (isScannerShape(e.name, content)) count++;
|
||
}
|
||
return count;
|
||
}
|
||
|
||
async function countMdFiles(dir) {
|
||
let count = 0;
|
||
for (const e of await safeListDir(dir)) {
|
||
if (e.isFile() && e.name.endsWith('.md')) count++;
|
||
}
|
||
return count;
|
||
}
|
||
|
||
async function countTestFiles(testsRoot) {
|
||
let count = 0;
|
||
async function walk(dir) {
|
||
for (const e of await safeListDir(dir)) {
|
||
const full = join(dir, e.name);
|
||
if (e.isDirectory()) await walk(full);
|
||
else if (e.isFile() && e.name.endsWith('.test.mjs')) count++;
|
||
}
|
||
}
|
||
await walk(testsRoot);
|
||
return count;
|
||
}
|
||
|
||
// Run the test suite in a subprocess and parse the `ℹ tests N` line emitted
|
||
// by node:test. Used for badge accuracy under --check-readme. Slow (~15s on
|
||
// the full plugin) but produces the canonical case count rather than an
|
||
// approximation. Returns null on failure so the caller can fall back to
|
||
// file count without crashing the audit.
|
||
async function countTestCases(pluginRoot) {
|
||
try {
|
||
const { stdout } = await execFileAsync(
|
||
process.execPath,
|
||
['--test', 'tests/**/*.test.mjs'],
|
||
{ cwd: pluginRoot, timeout: 60000, maxBuffer: 10 * 1024 * 1024 },
|
||
);
|
||
const match = stdout.match(/^[^\n]*tests\s+(\d+)\s*$/m);
|
||
return match ? Number(match[1]) : null;
|
||
} catch (err) {
|
||
// node --test exits non-zero when tests fail; the count line is still
|
||
// present on stdout. Re-parse it from the captured output.
|
||
const stdout = err?.stdout || '';
|
||
const match = stdout.match(/^[^\n]*tests\s+(\d+)\s*$/m);
|
||
return match ? Number(match[1]) : null;
|
||
}
|
||
}
|
||
|
||
async function countHookEntries(hooksJsonPath) {
|
||
let content;
|
||
try { content = await readFile(hooksJsonPath, 'utf-8'); } catch { return 0; }
|
||
const parsed = parseJson(content);
|
||
const hooks = parsed?.hooks || parsed;
|
||
if (!hooks || typeof hooks !== 'object' || Array.isArray(hooks)) return 0;
|
||
let n = 0;
|
||
for (const handlers of Object.values(hooks)) {
|
||
if (!Array.isArray(handlers)) continue;
|
||
for (const group of handlers) {
|
||
if (!Array.isArray(group?.hooks)) continue;
|
||
n += group.hooks.length;
|
||
}
|
||
}
|
||
return n;
|
||
}
|
||
|
||
/**
|
||
* Parse a numeric badge value from a README badge URL via line-anchored
|
||
* substring detection. Returns null if no badge for `kind` is found.
|
||
* Pattern: `badge/<kind>-<NUMBER>(+)?-<color>` — case-insensitive.
|
||
*/
|
||
function parseBadgeNumber(readme, kind) {
|
||
const lines = readme.split('\n');
|
||
const rx = new RegExp(`badge\\/${kind}-([0-9]+)\\+?-`, 'i');
|
||
for (const line of lines) {
|
||
const m = line.match(rx);
|
||
if (m) return Number(m[1]);
|
||
}
|
||
return null;
|
||
}
|
||
|
||
/**
|
||
* Compare README badge counts against filesystem-measured counts (v5 F6).
|
||
* Filesystem counts are the source of truth.
|
||
*
|
||
* @param {string} pluginDir
|
||
* @returns {Promise<{passed: boolean, mismatches: Array<{kind:string, expected:number, foundInReadme:number}>, counts: object, badges: object}>}
|
||
*/
|
||
export async function checkReadmeBadges(pluginDir) {
|
||
const testCases = await countTestCases(pluginDir);
|
||
const counts = {
|
||
scanners: await countScannerShape(join(pluginDir, 'scanners')),
|
||
commands: await countMdFiles(join(pluginDir, 'commands')),
|
||
agents: await countMdFiles(join(pluginDir, 'agents')),
|
||
hooks: await countHookEntries(join(pluginDir, 'hooks', 'hooks.json')),
|
||
tests: testCases ?? await countTestFiles(join(pluginDir, 'tests')),
|
||
knowledge: await countMdFiles(join(pluginDir, 'knowledge')),
|
||
};
|
||
let readme = '';
|
||
try { readme = await readFile(join(pluginDir, 'README.md'), 'utf-8'); } catch { /* missing */ }
|
||
const badges = {
|
||
scanners: parseBadgeNumber(readme, 'scanners'),
|
||
commands: parseBadgeNumber(readme, 'commands'),
|
||
agents: parseBadgeNumber(readme, 'agents'),
|
||
hooks: parseBadgeNumber(readme, 'hooks'),
|
||
tests: parseBadgeNumber(readme, 'tests'),
|
||
knowledge: parseBadgeNumber(readme, 'knowledge'),
|
||
};
|
||
const mismatches = [];
|
||
for (const kind of Object.keys(counts)) {
|
||
if (badges[kind] === null) continue; // no badge for this kind — silent
|
||
if (counts[kind] !== badges[kind]) {
|
||
mismatches.push({ kind, expected: counts[kind], foundInReadme: badges[kind] });
|
||
}
|
||
}
|
||
return { passed: mismatches.length === 0, mismatches, counts, badges };
|
||
}
|
||
|
||
/**
|
||
* Run self-audit on this plugin.
|
||
* @param {object} [opts]
|
||
* @param {boolean} [opts.fix=false] - Run fix-engine on auto-fixable findings
|
||
* @param {boolean} [opts.checkReadme=false] - Verify README badge counts (v5 F6)
|
||
* @returns {Promise<object>} Combined result
|
||
*/
|
||
export async function runSelfAudit(opts = {}) {
|
||
const pluginDir = PLUGIN_ROOT;
|
||
|
||
// 1. Run all config scanners on plugin root
|
||
// Fixture filtering is handled automatically by runAllScanners (filterFixtures defaults to true)
|
||
const configEnvelope = await runAllScanners(pluginDir);
|
||
|
||
// 2. Run plugin health scanner + apply suppressions
|
||
const pluginHealthResult = await scanPluginHealth(pluginDir);
|
||
const { suppressions } = await loadSuppressions(pluginDir);
|
||
if (suppressions.length > 0) {
|
||
const { active, suppressed } = applySuppressions(pluginHealthResult.findings, suppressions);
|
||
pluginHealthResult.findings = active;
|
||
pluginHealthResult.suppressedFindings = suppressed;
|
||
}
|
||
|
||
// 3. Score config quality
|
||
const areaScores = scoreByArea(configEnvelope.scanners);
|
||
const avgScore = areaScores.areas.length > 0
|
||
? Math.round(areaScores.areas.reduce((s, a) => s + a.score, 0) / areaScores.areas.length)
|
||
: 0;
|
||
const configGrade = gradeFromPassRate(avgScore);
|
||
|
||
// 4. Score plugin health
|
||
const pluginIssueCount = pluginHealthResult.findings.length;
|
||
const pluginScore = Math.max(0, 100 - pluginIssueCount * 10);
|
||
const pluginGrade = gradeFromPassRate(pluginScore);
|
||
|
||
// 5. Determine overall result
|
||
const allFindings = [
|
||
...configEnvelope.scanners.flatMap(s => s.findings),
|
||
...pluginHealthResult.findings,
|
||
];
|
||
|
||
const hasCritical = allFindings.some(f => f.severity === 'critical');
|
||
const hasHigh = allFindings.some(f => f.severity === 'high');
|
||
let exitCode = 0;
|
||
let verdict = 'PASS';
|
||
if (hasCritical) { exitCode = 2; verdict = 'FAIL'; }
|
||
else if (hasHigh) { exitCode = 1; verdict = 'WARN'; }
|
||
|
||
// 6. Optionally fix
|
||
let fixResult = null;
|
||
if (opts.fix && allFindings.some(f => f.autoFixable)) {
|
||
try {
|
||
const { planFixes, applyFixes } = await import('./fix-engine.mjs');
|
||
const plan = planFixes(configEnvelope);
|
||
if (plan.length > 0) {
|
||
fixResult = await applyFixes(plan);
|
||
}
|
||
} catch {
|
||
// Fix engine unavailable or failed — non-fatal
|
||
}
|
||
}
|
||
|
||
// 7. Optional README badge check (v5 F6)
|
||
let readmeCheck;
|
||
if (opts.checkReadme) {
|
||
readmeCheck = await checkReadmeBadges(pluginDir);
|
||
}
|
||
|
||
const out = {
|
||
pluginDir,
|
||
configGrade,
|
||
configScore: avgScore,
|
||
pluginGrade,
|
||
pluginScore,
|
||
configEnvelope,
|
||
pluginHealthResult,
|
||
allFindings,
|
||
exitCode,
|
||
verdict,
|
||
fixResult,
|
||
};
|
||
if (readmeCheck) out.readmeCheck = readmeCheck;
|
||
return out;
|
||
}
|
||
|
||
/**
|
||
* Format self-audit result for terminal display.
|
||
* @param {object} result - From runSelfAudit()
|
||
* @returns {string}
|
||
*/
|
||
export function formatSelfAudit(result) {
|
||
const lines = [];
|
||
lines.push('\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501');
|
||
lines.push(' Config-Audit Self-Audit');
|
||
lines.push('\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501');
|
||
lines.push('');
|
||
lines.push(` Plugin health: ${result.pluginGrade} (${result.pluginScore})`);
|
||
lines.push(` Config quality: ${result.configGrade} (${result.configScore})`);
|
||
lines.push('');
|
||
|
||
// Issues summary
|
||
const nonInfo = result.allFindings.filter(f => f.severity !== 'info');
|
||
if (nonInfo.length > 0) {
|
||
lines.push(` Issues (${nonInfo.length}):`);
|
||
for (const f of nonInfo.slice(0, 10)) {
|
||
lines.push(` - [${f.severity}] ${f.title}`);
|
||
}
|
||
if (nonInfo.length > 10) {
|
||
lines.push(` ...and ${nonInfo.length - 10} more`);
|
||
}
|
||
} else {
|
||
lines.push(' Issues (0)');
|
||
}
|
||
|
||
lines.push('');
|
||
|
||
// Fix results
|
||
if (result.fixResult) {
|
||
const applied = result.fixResult.filter(r => r.status === 'applied').length;
|
||
lines.push(` Auto-fix: ${applied} fix(es) applied`);
|
||
lines.push('');
|
||
}
|
||
|
||
// Verdict
|
||
if (result.verdict === 'PASS') {
|
||
lines.push(' Self-audit: PASS');
|
||
lines.push(' (No critical or high findings)');
|
||
} else if (result.verdict === 'WARN') {
|
||
lines.push(' Self-audit: WARN');
|
||
lines.push(' (High-severity findings detected)');
|
||
} else {
|
||
lines.push(' Self-audit: FAIL');
|
||
lines.push(' (Critical findings detected)');
|
||
}
|
||
|
||
lines.push('');
|
||
lines.push('\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501');
|
||
|
||
return lines.join('\n');
|
||
}
|
||
|
||
// --- CLI entry point ---
|
||
async function main() {
|
||
const args = process.argv.slice(2);
|
||
const jsonMode = args.includes('--json');
|
||
const fixMode = args.includes('--fix');
|
||
const checkReadmeMode = args.includes('--check-readme');
|
||
|
||
const result = await runSelfAudit({ fix: fixMode, checkReadme: checkReadmeMode });
|
||
|
||
if (jsonMode) {
|
||
const json = JSON.stringify(result, null, 2) + '\n';
|
||
await new Promise(resolve => process.stdout.write(json, resolve));
|
||
} else {
|
||
process.stderr.write('\n' + formatSelfAudit(result) + '\n');
|
||
}
|
||
|
||
process.exitCode = result.exitCode;
|
||
}
|
||
|
||
const isDirectRun = process.argv[1] && resolve(process.argv[1]) === resolve(fileURLToPath(import.meta.url));
|
||
if (isDirectRun) {
|
||
main().catch(err => {
|
||
process.stderr.write(`Fatal: ${err.message}\n`);
|
||
process.exit(3);
|
||
});
|
||
}
|