import { describe, it } from 'node:test'; import assert from 'node:assert/strict'; import { resolve, dirname } from 'node:path'; import { fileURLToPath } from 'node:url'; import { execFile } from 'node:child_process'; import { promisify } from 'node:util'; import { readFile, unlink } from 'node:fs/promises'; const exec = promisify(execFile); const __dirname = dirname(fileURLToPath(import.meta.url)); const REPO = resolve(__dirname, '../..'); const CLI = resolve(REPO, 'scanners/posture.mjs'); const FIXTURE = resolve(REPO, 'tests/fixtures/marketplace-medium'); const POSTURE_JSON_SNAPSHOT = resolve(REPO, 'tests/snapshots/v5.0.0/posture.json'); const POSTURE_STDERR_SNAPSHOT = resolve(REPO, 'tests/snapshots/v5.0.0-stderr/posture.txt'); /** * Normalize a runPosture result for snapshot comparison by zeroing out * time-varying fields, machine-specific paths, and ancestor-cascade-derived * counts. `claudeMdEstimatedTokens` reflects walkClaudeMdCascade walking * upward from the fixture; any docs edit to this plugin's own CLAUDE.md * ripples into it even though scanner behavior is unchanged. */ function normalizePosture(p) { const out = JSON.parse(JSON.stringify(p)); if (out.scannerEnvelope) { if (out.scannerEnvelope.meta) { out.scannerEnvelope.meta.target = ''; out.scannerEnvelope.meta.timestamp = ''; } if (Array.isArray(out.scannerEnvelope.scanners)) { for (const s of out.scannerEnvelope.scanners) { s.duration_ms = 0; if (s.activeConfig && 'claudeMdEstimatedTokens' in s.activeConfig) { s.activeConfig.claudeMdEstimatedTokens = ''; } } } } return out; } /** Strip time-varying durations (Xms) so progress lines compare verbatim across runs. */ function normalizeStderr(s) { return s.replace(/\(\d+ms\)/g, '(0ms)'); } async function runPosture(flags) { const proc = await exec('node', [CLI, FIXTURE, ...flags], { timeout: 60000, cwd: REPO, }).catch(err => err); // posture exits non-zero on findings — capture either way return { stdout: proc.stdout || '', stderr: proc.stderr || '', }; } describe('posture humanizer wiring (Step 6)', () => { describe('--json mode (SC-6: byte-equal stdout)', () => { it('stdout JSON deepEquals v5.0.0 snapshot', async () => { const { stdout } = await runPosture(['--json']); const actual = JSON.parse(stdout); const expected = JSON.parse(await readFile(POSTURE_JSON_SNAPSHOT, 'utf-8')); assert.deepStrictEqual(normalizePosture(actual), normalizePosture(expected)); }); it('does NOT write a scorecard to stderr (suppressed)', async () => { const { stderr } = await runPosture(['--json']); assert.ok(!stderr.includes('Config-Audit Health Score'), 'stderr must NOT contain scorecard in --json mode'); assert.ok(!stderr.includes('Configuration health'), 'stderr must NOT contain humanized scorecard in --json mode'); }); it('preserves v5.0.0 finding shape (no humanizer fields in scannerEnvelope)', async () => { const { stdout } = await runPosture(['--json']); const actual = JSON.parse(stdout); for (const s of actual.scannerEnvelope.scanners) { for (const f of s.findings) { assert.equal(f.userImpactCategory, undefined, `${f.id}: --json findings must not have userImpactCategory`); } } }); }); describe('--raw mode (SC-7: byte-equal stdout + verbatim stderr)', () => { it('stdout JSON deepEquals v5.0.0 snapshot', async () => { const { stdout } = await runPosture(['--raw']); const actual = JSON.parse(stdout); const expected = JSON.parse(await readFile(POSTURE_JSON_SNAPSHOT, 'utf-8')); assert.deepStrictEqual(normalizePosture(actual), normalizePosture(expected)); }); it('stderr scorecard verbatim matches v5.0.0 stderr snapshot', async () => { const { stderr } = await runPosture(['--raw']); const expected = await readFile(POSTURE_STDERR_SNAPSHOT, 'utf-8'); // Compare the scorecard portion verbatim (modulo timing in scanner progress lines) assert.equal(normalizeStderr(stderr).trim(), normalizeStderr(expected).trim()); }); it('preserves v5.0.0 finding shape in stdout', async () => { const { stdout } = await runPosture(['--raw']); const actual = JSON.parse(stdout); for (const s of actual.scannerEnvelope.scanners) { for (const f of s.findings) { assert.equal(f.userImpactCategory, undefined, `${f.id}: --raw findings must not have userImpactCategory`); } } }); }); describe('default mode (humanized scorecard)', () => { it('writes humanized scorecard to stderr', async () => { const { stderr } = await runPosture([]); // Humanized scorecard must contain at least one user-friendly cue not in raw v5.0.0 const hasGradeContext = /healthy|good shape|attention|polish|setup/i.test(stderr); assert.ok(hasGradeContext, `humanized stderr scorecard must contain user-friendly phrasing, got:\n${stderr}`); }); it('does NOT write JSON to stdout in default mode', async () => { const { stdout } = await runPosture([]); assert.equal(stdout.trim(), '', 'default mode must not write JSON to stdout'); }); it('humanized scorecard differs byte-wise from v5.0.0 stderr', async () => { const { stderr } = await runPosture([]); const expected = await readFile(POSTURE_STDERR_SNAPSHOT, 'utf-8'); assert.notEqual(normalizeStderr(stderr).trim(), normalizeStderr(expected).trim(), 'humanized stderr must differ from v5.0.0 verbatim stderr'); }); }); });