- Plugin README: add "What's New in v5.1.0" section with humanizer overview, before/after example, plain-language vocabulary table, --raw flag docs. Bump version badge 5.0.0 → 5.1.0. Add Version History row. - Plugin CLAUDE.md: add humanizer.mjs + humanizer-data.mjs to Scanner Lib table. Add "Plain-Language Output (v5.1.0)" section documenting output modes, vocabularies, and Wave 5 lessons. Bump test count 635 → 792 across 52 test files. - Marketplace root README: bump config-audit entry 5.0.0 → 5.1.0, update one-line description to mention plain-language UX, add bullet for the v5.1.0 humanizer, bump test count 635+ → 792+. Test-normalizer hardening (consequence of growing CLAUDE.md): walkClaudeMdCascade walks upward from the marketplace-medium fixture into this plugin's own CLAUDE.md, so any docs edit ripples into `scanners[*].activeConfig.claudeMdEstimatedTokens`. The v5.0.0 byte-stability contract is about scanner internals being unchanged, not ancestor input content being frozen. Normalizers in json-backcompat, raw-backcompat, posture-humanizer, scan-orchestrator-humanizer, and snapshot-default-output now strip claudeMdEstimatedTokens to <ANCESTOR_DERIVED>. The default-output snapshot for scan-orchestrator was re-seeded via UPDATE_SNAPSHOT=1 (intent: Wave 6 docs additions; humanizer prose unchanged). Verify: - grep -E "5\.1\.0|v5\.1\.0" README.md CLAUDE.md ../../README.md | wc -l = 12 - node --test 'tests/**/*.test.mjs' = 792/792 pass - self-audit configGrade A (97), pluginGrade A (100), readmeCheck.passed true
136 lines
5.6 KiB
JavaScript
136 lines
5.6 KiB
JavaScript
import { describe, it } from 'node:test';
|
|
import assert from 'node:assert/strict';
|
|
import { resolve, dirname } from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
import { execFile } from 'node:child_process';
|
|
import { promisify } from 'node:util';
|
|
import { readFile, unlink } from 'node:fs/promises';
|
|
|
|
const exec = promisify(execFile);
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const REPO = resolve(__dirname, '../..');
|
|
const CLI = resolve(REPO, 'scanners/posture.mjs');
|
|
const FIXTURE = resolve(REPO, 'tests/fixtures/marketplace-medium');
|
|
const POSTURE_JSON_SNAPSHOT = resolve(REPO, 'tests/snapshots/v5.0.0/posture.json');
|
|
const POSTURE_STDERR_SNAPSHOT = resolve(REPO, 'tests/snapshots/v5.0.0-stderr/posture.txt');
|
|
|
|
/**
|
|
* Normalize a runPosture result for snapshot comparison by zeroing out
|
|
* time-varying fields, machine-specific paths, and ancestor-cascade-derived
|
|
* counts. `claudeMdEstimatedTokens` reflects walkClaudeMdCascade walking
|
|
* upward from the fixture; any docs edit to this plugin's own CLAUDE.md
|
|
* ripples into it even though scanner behavior is unchanged.
|
|
*/
|
|
function normalizePosture(p) {
|
|
const out = JSON.parse(JSON.stringify(p));
|
|
if (out.scannerEnvelope) {
|
|
if (out.scannerEnvelope.meta) {
|
|
out.scannerEnvelope.meta.target = '<TARGET>';
|
|
out.scannerEnvelope.meta.timestamp = '<TIMESTAMP>';
|
|
}
|
|
if (Array.isArray(out.scannerEnvelope.scanners)) {
|
|
for (const s of out.scannerEnvelope.scanners) {
|
|
s.duration_ms = 0;
|
|
if (s.activeConfig && 'claudeMdEstimatedTokens' in s.activeConfig) {
|
|
s.activeConfig.claudeMdEstimatedTokens = '<ANCESTOR_DERIVED>';
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return out;
|
|
}
|
|
|
|
/** Strip time-varying durations (Xms) so progress lines compare verbatim across runs. */
|
|
function normalizeStderr(s) {
|
|
return s.replace(/\(\d+ms\)/g, '(0ms)');
|
|
}
|
|
|
|
async function runPosture(flags) {
|
|
const proc = await exec('node', [CLI, FIXTURE, ...flags], {
|
|
timeout: 60000,
|
|
cwd: REPO,
|
|
}).catch(err => err); // posture exits non-zero on findings — capture either way
|
|
return {
|
|
stdout: proc.stdout || '',
|
|
stderr: proc.stderr || '',
|
|
};
|
|
}
|
|
|
|
describe('posture humanizer wiring (Step 6)', () => {
|
|
describe('--json mode (SC-6: byte-equal stdout)', () => {
|
|
it('stdout JSON deepEquals v5.0.0 snapshot', async () => {
|
|
const { stdout } = await runPosture(['--json']);
|
|
const actual = JSON.parse(stdout);
|
|
const expected = JSON.parse(await readFile(POSTURE_JSON_SNAPSHOT, 'utf-8'));
|
|
assert.deepStrictEqual(normalizePosture(actual), normalizePosture(expected));
|
|
});
|
|
|
|
it('does NOT write a scorecard to stderr (suppressed)', async () => {
|
|
const { stderr } = await runPosture(['--json']);
|
|
assert.ok(!stderr.includes('Config-Audit Health Score'),
|
|
'stderr must NOT contain scorecard in --json mode');
|
|
assert.ok(!stderr.includes('Configuration health'),
|
|
'stderr must NOT contain humanized scorecard in --json mode');
|
|
});
|
|
|
|
it('preserves v5.0.0 finding shape (no humanizer fields in scannerEnvelope)', async () => {
|
|
const { stdout } = await runPosture(['--json']);
|
|
const actual = JSON.parse(stdout);
|
|
for (const s of actual.scannerEnvelope.scanners) {
|
|
for (const f of s.findings) {
|
|
assert.equal(f.userImpactCategory, undefined,
|
|
`${f.id}: --json findings must not have userImpactCategory`);
|
|
}
|
|
}
|
|
});
|
|
});
|
|
|
|
describe('--raw mode (SC-7: byte-equal stdout + verbatim stderr)', () => {
|
|
it('stdout JSON deepEquals v5.0.0 snapshot', async () => {
|
|
const { stdout } = await runPosture(['--raw']);
|
|
const actual = JSON.parse(stdout);
|
|
const expected = JSON.parse(await readFile(POSTURE_JSON_SNAPSHOT, 'utf-8'));
|
|
assert.deepStrictEqual(normalizePosture(actual), normalizePosture(expected));
|
|
});
|
|
|
|
it('stderr scorecard verbatim matches v5.0.0 stderr snapshot', async () => {
|
|
const { stderr } = await runPosture(['--raw']);
|
|
const expected = await readFile(POSTURE_STDERR_SNAPSHOT, 'utf-8');
|
|
// Compare the scorecard portion verbatim (modulo timing in scanner progress lines)
|
|
assert.equal(normalizeStderr(stderr).trim(), normalizeStderr(expected).trim());
|
|
});
|
|
|
|
it('preserves v5.0.0 finding shape in stdout', async () => {
|
|
const { stdout } = await runPosture(['--raw']);
|
|
const actual = JSON.parse(stdout);
|
|
for (const s of actual.scannerEnvelope.scanners) {
|
|
for (const f of s.findings) {
|
|
assert.equal(f.userImpactCategory, undefined,
|
|
`${f.id}: --raw findings must not have userImpactCategory`);
|
|
}
|
|
}
|
|
});
|
|
});
|
|
|
|
describe('default mode (humanized scorecard)', () => {
|
|
it('writes humanized scorecard to stderr', async () => {
|
|
const { stderr } = await runPosture([]);
|
|
// Humanized scorecard must contain at least one user-friendly cue not in raw v5.0.0
|
|
const hasGradeContext = /healthy|good shape|attention|polish|setup/i.test(stderr);
|
|
assert.ok(hasGradeContext,
|
|
`humanized stderr scorecard must contain user-friendly phrasing, got:\n${stderr}`);
|
|
});
|
|
|
|
it('does NOT write JSON to stdout in default mode', async () => {
|
|
const { stdout } = await runPosture([]);
|
|
assert.equal(stdout.trim(), '', 'default mode must not write JSON to stdout');
|
|
});
|
|
|
|
it('humanized scorecard differs byte-wise from v5.0.0 stderr', async () => {
|
|
const { stderr } = await runPosture([]);
|
|
const expected = await readFile(POSTURE_STDERR_SNAPSHOT, 'utf-8');
|
|
assert.notEqual(normalizeStderr(stderr).trim(), normalizeStderr(expected).trim(),
|
|
'humanized stderr must differ from v5.0.0 verbatim stderr');
|
|
});
|
|
});
|
|
});
|