Step 8 of v5.1.0 humanizer Wave 4. Adds tests/lint-default-output.mjs
runner and tests/scanners/lint-default-output.test.mjs wrapper that
exercise SC-3 against the 6 prose CLIs (scan-orchestrator, posture,
token-hotspots-cli, plugin-health-scanner, drift-cli, fix-cli) running
in default (humanized) mode against tests/fixtures/marketplace-medium.
Lint scope is stderr only — JSON envelope keys ("scanner", "severity")
are structural, not prose. Humanized prose fields embedded inside JSON
are already covered by tests/lib/humanizer-data.test.mjs tier1/tier3
checks. Code references inside backticks pass the lint
(stripBacktickSpans) so technical identifiers can appear when wrapped.
Default-mode prose fixes to land lint at zero violations:
- scan-orchestrator: top banner switches to "Config-Audit v2.2.0" and
per-scanner progress wraps "[XXX] Label" in backticks. --raw and
--json paths preserve the v5.0.0 verbatim banner via new
opts.humanizedProgress flag on runAllScanners.
- plugin-health-scanner: top banner switches to "Plugin Health v2.1.0"
in default mode; --raw/--json keep "Plugin Health Scanner v2.1.0".
- scoring.mjs generateHealthScorecard humanized branch: area names
(CLAUDE.md, Hooks, MCP, Settings, Rules, Imports, Conflicts, Token
Efficiency, Plugin Hygiene) are wrapped in backticks; dot-padding
compensates so column alignment matches v5.0.0 layout.
- posture / drift-cli / fix-cli: thread humanizedProgress flag through
their runAllScanners calls so default mode emits humanized progress
and --raw/--json preserve the v5.0.0 stderr snapshot.
Test infrastructure only — user-facing docs land in Wave 5/6 once
commands and agents consume the humanized payload.
Tests: 735 to 736 (+1 SC-3 wrapper). Full suite passes.
Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
126 lines
3.7 KiB
JavaScript
126 lines
3.7 KiB
JavaScript
#!/usr/bin/env node
|
|
|
|
/**
|
|
* Config-Audit Posture Assessment CLI
|
|
* Runs all scanners + scoring in a single Node.js process.
|
|
* Usage: node posture.mjs <target-path> [--json] [--global] [--output-file path]
|
|
* Zero external dependencies.
|
|
*/
|
|
|
|
import { resolve } from 'node:path';
|
|
import { writeFile } from 'node:fs/promises';
|
|
import { runAllScanners } from './scan-orchestrator.mjs';
|
|
import {
|
|
calculateUtilization,
|
|
determineMaturityLevel,
|
|
determineSegment,
|
|
scoreByArea,
|
|
topActions,
|
|
generateScorecard,
|
|
generateHealthScorecard,
|
|
} from './lib/scoring.mjs';
|
|
|
|
/**
|
|
* Run posture assessment and return structured result.
|
|
* @param {string} targetPath
|
|
* @param {object} [opts]
|
|
* @param {boolean} [opts.includeGlobal=false]
|
|
* @param {boolean} [opts.fullMachine=false] - Scan all known locations across the machine
|
|
* @returns {Promise<object>}
|
|
*/
|
|
export async function runPosture(targetPath, opts = {}) {
|
|
const envelope = await runAllScanners(targetPath, opts);
|
|
|
|
// Extract GAP scanner results
|
|
const gapScanner = envelope.scanners.find(s => s.scanner === 'GAP');
|
|
const gapFindings = gapScanner ? gapScanner.findings : [];
|
|
|
|
// Calculate scores
|
|
const utilization = calculateUtilization(gapFindings);
|
|
const maturity = determineMaturityLevel(gapFindings, { files: [] });
|
|
const segment = determineSegment(utilization.score);
|
|
const areaScores = scoreByArea(envelope.scanners);
|
|
const actions = topActions(gapFindings);
|
|
|
|
return {
|
|
utilization,
|
|
maturity,
|
|
segment,
|
|
areas: areaScores.areas,
|
|
overallGrade: areaScores.overallGrade,
|
|
topActions: actions,
|
|
opportunityCount: gapFindings.length,
|
|
scannerEnvelope: envelope,
|
|
};
|
|
}
|
|
|
|
// --- CLI entry point ---
|
|
async function main() {
|
|
const args = process.argv.slice(2);
|
|
let targetPath = '.';
|
|
let outputFile = null;
|
|
let jsonMode = false;
|
|
let rawMode = false;
|
|
let includeGlobal = false;
|
|
let fullMachine = false;
|
|
|
|
for (let i = 0; i < args.length; i++) {
|
|
if (args[i] === '--output-file' && args[i + 1]) {
|
|
outputFile = args[++i];
|
|
} else if (args[i] === '--json') {
|
|
jsonMode = true;
|
|
} else if (args[i] === '--raw') {
|
|
rawMode = true;
|
|
} else if (args[i] === '--global') {
|
|
includeGlobal = true;
|
|
} else if (args[i] === '--full-machine') {
|
|
fullMachine = true;
|
|
} else if (args[i] === '--include-fixtures') {
|
|
// handled below
|
|
} else if (!args[i].startsWith('-')) {
|
|
targetPath = args[i];
|
|
}
|
|
}
|
|
|
|
const filterFixtures = !args.includes('--include-fixtures');
|
|
const humanizedProgress = !jsonMode && !rawMode;
|
|
const result = await runPosture(targetPath, {
|
|
includeGlobal,
|
|
fullMachine,
|
|
filterFixtures,
|
|
humanizedProgress,
|
|
});
|
|
|
|
// stdout JSON path: --json and --raw both write the v5.0.0-shape result
|
|
// (byte-identical). Default mode writes nothing to stdout.
|
|
if (jsonMode || rawMode) {
|
|
const json = JSON.stringify(result, null, 2);
|
|
process.stdout.write(json + '\n');
|
|
}
|
|
|
|
// stderr scorecard path: --json suppresses; --raw renders v5.0.0 verbatim
|
|
// (humanized=false); default renders humanized scorecard.
|
|
if (!jsonMode) {
|
|
const scorecard = generateHealthScorecard(
|
|
{ areas: result.areas, overallGrade: result.overallGrade },
|
|
result.opportunityCount,
|
|
{ humanized: !rawMode },
|
|
);
|
|
process.stderr.write('\n' + scorecard + '\n');
|
|
}
|
|
|
|
if (outputFile) {
|
|
const json = JSON.stringify(result, null, 2);
|
|
await writeFile(outputFile, json, 'utf-8');
|
|
process.stderr.write(`\nResults written to ${outputFile}\n`);
|
|
}
|
|
}
|
|
|
|
// Only run CLI if invoked directly
|
|
const isDirectRun = process.argv[1] && resolve(process.argv[1]) === resolve(new URL(import.meta.url).pathname);
|
|
if (isDirectRun) {
|
|
main().catch(err => {
|
|
process.stderr.write(`Fatal: ${err.message}\n`);
|
|
process.exit(1);
|
|
});
|
|
}
|