feat(humanizer): wire humanizer into posture and scoring scorecard

generateHealthScorecard signature: 2-arg → 3-arg (areaScores, opportunityCount,
options = {}). options.humanized=true renders friendlier title, grade-context
line per overall grade, and rephrased opportunity line. options.humanized=false
(or 2-arg call) preserves v5.0.0 verbatim output for backwards-compat.

topActions also gets an optional options.humanized that swaps recommendations
through humanizeFinding lookup.

posture.mjs main():
  --json → write JSON to stdout, suppress stderr scorecard
  --raw  → write JSON to stdout (byte-identical to --json), write v5.0.0
           verbatim scorecard to stderr
  default → humanized scorecard to stderr, no stdout

posture.test.mjs scorecard-prose assertions re-anchored to --raw mode (the
explicit v5.0.0 path) — Wave 0 audit only covered finding-title strings;
scorecard prose surfaces here for the first time.

Wave 3 / Step 6 of v5.1.0 humanizer.

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
This commit is contained in:
Kjell Tore Guttormsen 2026-05-01 17:38:03 +02:00
commit 70ff900578
5 changed files with 331 additions and 14 deletions

View file

@ -4,6 +4,19 @@
*/
import { gradeFromPassRate, WEIGHTS } from './severity.mjs';
import { humanizeFinding } from './humanizer.mjs';
/**
* One-line plain-language context per overall grade. Used when a scorecard
* is rendered with `options.humanized: true`.
*/
const GRADE_CONTEXT = {
A: 'Healthy setup, only minor polish needed',
B: 'Good shape — a few items to address',
C: 'Some attention needed',
D: 'Several issues — prioritize the urgent ones',
F: 'Important issues need attention',
};
// --- Tier weights for utilization calculation ---
const TIER_WEIGHTS = { t1: 3, t2: 2, t3: 1, t4: 1 };
@ -235,14 +248,21 @@ export function scoreByArea(scannerResults) {
/**
* Derive top 3 actions from GAP findings (T1 first, then T2).
* @param {object[]} gapFindings
* @param {object} [options]
* @param {boolean} [options.humanized=false] - When true, return humanized
* recommendations (looked up via humanizer translations).
* @returns {string[]}
*/
export function topActions(gapFindings) {
export function topActions(gapFindings, options = {}) {
const tierOrder = ['t1', 't2', 't3', 't4'];
const sorted = [...gapFindings].sort(
(a, b) => tierOrder.indexOf(a.category) - tierOrder.indexOf(b.category),
);
return sorted.slice(0, 3).map(f => f.recommendation);
const top3 = sorted.slice(0, 3);
if (options.humanized) {
return top3.map(f => humanizeFinding(f).recommendation);
}
return top3.map(f => f.recommendation);
}
/**
@ -307,22 +327,39 @@ export function generateScorecard(areaScores, utilization, maturity, segment, ac
* Shows only the quality areas (currently 8) no utilization, maturity, or segment.
* @param {{ areas: Array<{ name: string, grade: string, score: number }>, overallGrade: string }} areaScores
* @param {number} opportunityCount - Number of GAP findings (shown as opportunity count)
* @param {object} [options]
* @param {boolean} [options.humanized=false] - When true, render with plain-language
* grade context and friendlier opportunity phrasing. When false (default),
* render the v5.0.0 verbatim scorecard (backwards-compatible).
* @returns {string}
*/
export function generateHealthScorecard(areaScores, opportunityCount) {
export function generateHealthScorecard(areaScores, opportunityCount, options = {}) {
const qualityAreas = areaScores.areas.filter(a => a.name !== 'Feature Coverage');
const avgScore = qualityAreas.length > 0
? Math.round(qualityAreas.reduce((s, a) => s + a.score, 0) / qualityAreas.length)
: 0;
const humanized = options.humanized === true;
const lines = [];
lines.push('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
lines.push(' Config-Audit Health Score');
lines.push(humanized ? ' Configuration health' : ' Config-Audit Health Score');
lines.push('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
lines.push('');
lines.push(` Health: ${areaScores.overallGrade} (${avgScore}/100) ${qualityAreas.length} areas scanned`);
if (humanized) {
const context = GRADE_CONTEXT[areaScores.overallGrade] || '';
const headline = context
? ` Health: ${areaScores.overallGrade} (${avgScore}/100) — ${context}`
: ` Health: ${areaScores.overallGrade} (${avgScore}/100)`;
lines.push(headline);
lines.push(` ${qualityAreas.length} areas reviewed`);
} else {
lines.push(` Health: ${areaScores.overallGrade} (${avgScore}/100) ${qualityAreas.length} areas scanned`);
}
lines.push('');
lines.push(' Area Scores');
lines.push(humanized ? ' Area scores' : ' Area Scores');
lines.push(' ───────────');
// Format areas in 2-column layout (quality areas only)
@ -340,7 +377,12 @@ export function generateHealthScorecard(areaScores, opportunityCount) {
if (opportunityCount > 0) {
lines.push('');
lines.push(` ${opportunityCount} ${opportunityCount === 1 ? 'opportunity' : 'opportunities'} available — run /config-audit feature-gap for recommendations`);
if (humanized) {
const noun = opportunityCount === 1 ? 'way' : 'ways';
lines.push(` ${opportunityCount} ${noun} you could get more out of Claude Code — see /config-audit feature-gap`);
} else {
lines.push(` ${opportunityCount} ${opportunityCount === 1 ? 'opportunity' : 'opportunities'} available — run /config-audit feature-gap for recommendations`);
}
}
lines.push('');

View file

@ -60,6 +60,7 @@ async function main() {
let targetPath = '.';
let outputFile = null;
let jsonMode = false;
let rawMode = false;
let includeGlobal = false;
let fullMachine = false;
@ -68,6 +69,8 @@ async function main() {
outputFile = args[++i];
} else if (args[i] === '--json') {
jsonMode = true;
} else if (args[i] === '--raw') {
rawMode = true;
} else if (args[i] === '--global') {
includeGlobal = true;
} else if (args[i] === '--full-machine') {
@ -82,14 +85,20 @@ async function main() {
const filterFixtures = !args.includes('--include-fixtures');
const result = await runPosture(targetPath, { includeGlobal, fullMachine, filterFixtures });
if (jsonMode) {
// stdout JSON path: --json and --raw both write the v5.0.0-shape result
// (byte-identical). Default mode writes nothing to stdout.
if (jsonMode || rawMode) {
const json = JSON.stringify(result, null, 2);
process.stdout.write(json + '\n');
} else {
// Terminal scorecard (v3 health format)
}
// stderr scorecard path: --json suppresses; --raw renders v5.0.0 verbatim
// (humanized=false); default renders humanized scorecard.
if (!jsonMode) {
const scorecard = generateHealthScorecard(
{ areas: result.areas, overallGrade: result.overallGrade },
result.opportunityCount,
{ humanized: !rawMode },
);
process.stderr.write('\n' + scorecard + '\n');
}

View file

@ -0,0 +1,134 @@
import { describe, it } from 'node:test';
import assert from 'node:assert/strict';
import { generateHealthScorecard, topActions } from '../../scanners/lib/scoring.mjs';
const SAMPLE_AREA_SCORES = {
areas: [
{ id: 'claude_md', name: 'CLAUDE.md', grade: 'A', score: 100, findingCount: 0 },
{ id: 'settings', name: 'Settings', grade: 'A', score: 90, findingCount: 1 },
{ id: 'hooks', name: 'Hooks', grade: 'A', score: 100, findingCount: 0 },
{ id: 'feature_coverage', name: 'Feature Coverage', grade: 'D', score: 30, findingCount: 17 },
],
overallGrade: 'A',
};
const SAMPLE_GAP_FINDINGS = [
{
id: 'CA-GAP-001',
scanner: 'GAP',
severity: 'medium',
title: 'No CLAUDE.md file',
description: 'No project instructions file detected.',
recommendation: 'Create a CLAUDE.md file with project-specific guidance.',
category: 't1',
file: null,
},
{
id: 'CA-GAP-002',
scanner: 'GAP',
severity: 'medium',
title: 'No permissions configured',
description: 'No permissions block in settings.',
recommendation: 'Add a permissions block to settings.json.',
category: 't1',
file: null,
},
{
id: 'CA-GAP-003',
scanner: 'GAP',
severity: 'low',
title: 'No status line configured',
description: 'No status line.',
recommendation: 'Add a status line.',
category: 't3',
file: null,
},
];
describe('generateHealthScorecard signature change (3-param)', () => {
it('2-arg call: backwards-compatible (humanized defaults to false)', () => {
const out = generateHealthScorecard(SAMPLE_AREA_SCORES, 17);
assert.equal(typeof out, 'string');
assert.ok(out.length > 0);
assert.ok(out.includes('Config-Audit Health Score'),
'non-humanized scorecard should contain v5.0.0 title');
});
it('3-arg call with {humanized: false}: byte-equal to 2-arg call', () => {
const twoArg = generateHealthScorecard(SAMPLE_AREA_SCORES, 17);
const threeArgFalse = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: false });
assert.equal(threeArgFalse, twoArg, 'options.humanized=false must produce identical output to 2-arg call');
});
it('3-arg call with {humanized: true}: differs from non-humanized', () => {
const nonHumanized = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: false });
const humanized = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: true });
assert.notEqual(humanized, nonHumanized,
'humanized=true must produce different output from humanized=false');
});
it('3-arg call with {humanized: true}: contains user-friendly phrasing', () => {
const humanized = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: true });
// Must contain at least one humanized cue distinguishing it from v5.0.0 prose
const hasGradeContext = /healthy|good shape|attention|polish|setup/i.test(humanized);
assert.ok(hasGradeContext,
`humanized scorecard must include user-friendly grade context, got:\n${humanized}`);
});
it('preserves area names and scores in both modes', () => {
const nonHumanized = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: false });
const humanized = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: true });
for (const area of SAMPLE_AREA_SCORES.areas.filter(a => a.name !== 'Feature Coverage')) {
assert.ok(nonHumanized.includes(area.name),
`non-humanized scorecard must include area name "${area.name}"`);
assert.ok(humanized.includes(area.name),
`humanized scorecard must include area name "${area.name}"`);
assert.ok(nonHumanized.includes(`(${area.score})`),
`non-humanized scorecard must include score (${area.score})`);
assert.ok(humanized.includes(`(${area.score})`),
`humanized scorecard must include score (${area.score})`);
}
});
it('opportunity count handling in humanized mode', () => {
const humanizedZero = generateHealthScorecard(SAMPLE_AREA_SCORES, 0, { humanized: true });
const humanizedMany = generateHealthScorecard(SAMPLE_AREA_SCORES, 17, { humanized: true });
assert.ok(humanizedMany.includes('17'), 'humanized scorecard must include opportunity count');
// Both paths must remain finite strings
assert.equal(typeof humanizedZero, 'string');
assert.equal(typeof humanizedMany, 'string');
});
});
describe('topActions humanizer support', () => {
it('1-arg call: returns raw recommendations (backwards-compatible)', () => {
const actions = topActions(SAMPLE_GAP_FINDINGS);
assert.equal(actions.length, 3);
assert.equal(actions[0], 'Create a CLAUDE.md file with project-specific guidance.');
assert.equal(actions[1], 'Add a permissions block to settings.json.');
assert.equal(actions[2], 'Add a status line.');
});
it('2-arg call with {humanized: false}: identical to 1-arg call', () => {
const oneArg = topActions(SAMPLE_GAP_FINDINGS);
const twoArg = topActions(SAMPLE_GAP_FINDINGS, { humanized: false });
assert.deepStrictEqual(twoArg, oneArg);
});
it('2-arg call with {humanized: true}: at least one recommendation differs', () => {
const raw = topActions(SAMPLE_GAP_FINDINGS, { humanized: false });
const humanized = topActions(SAMPLE_GAP_FINDINGS, { humanized: true });
assert.equal(humanized.length, raw.length, 'array length preserved');
// The humanizer's GAP TRANSLATIONS replace at least one recommendation (No CLAUDE.md → "Add the file…")
const anyDiffer = humanized.some((r, i) => r !== raw[i]);
assert.ok(anyDiffer,
`humanized=true must change at least one recommendation. raw=${JSON.stringify(raw)} humanized=${JSON.stringify(humanized)}`);
});
it('preserves ordering by tier (t1 → t2 → t3)', () => {
const humanized = topActions(SAMPLE_GAP_FINDINGS, { humanized: true });
assert.equal(humanized.length, 3);
// 1st & 2nd: t1 findings, 3rd: t3 finding (t2 absent in sample)
// Both modes preserve this ordering.
});
});

View file

@ -0,0 +1,130 @@
import { describe, it } from 'node:test';
import assert from 'node:assert/strict';
import { resolve, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import { execFile } from 'node:child_process';
import { promisify } from 'node:util';
import { readFile, unlink } from 'node:fs/promises';
const exec = promisify(execFile);
const __dirname = dirname(fileURLToPath(import.meta.url));
const REPO = resolve(__dirname, '../..');
const CLI = resolve(REPO, 'scanners/posture.mjs');
const FIXTURE = resolve(REPO, 'tests/fixtures/marketplace-medium');
const POSTURE_JSON_SNAPSHOT = resolve(REPO, 'tests/snapshots/v5.0.0/posture.json');
const POSTURE_STDERR_SNAPSHOT = resolve(REPO, 'tests/snapshots/v5.0.0-stderr/posture.txt');
/**
* Normalize a runPosture result for snapshot comparison by zeroing out
* time-varying fields and machine-specific paths.
*/
function normalizePosture(p) {
const out = JSON.parse(JSON.stringify(p));
if (out.scannerEnvelope) {
if (out.scannerEnvelope.meta) {
out.scannerEnvelope.meta.target = '<TARGET>';
out.scannerEnvelope.meta.timestamp = '<TIMESTAMP>';
}
if (Array.isArray(out.scannerEnvelope.scanners)) {
for (const s of out.scannerEnvelope.scanners) {
s.duration_ms = 0;
}
}
}
return out;
}
/** Strip time-varying durations (Xms) so progress lines compare verbatim across runs. */
function normalizeStderr(s) {
return s.replace(/\(\d+ms\)/g, '(0ms)');
}
async function runPosture(flags) {
const proc = await exec('node', [CLI, FIXTURE, ...flags], {
timeout: 60000,
cwd: REPO,
}).catch(err => err); // posture exits non-zero on findings — capture either way
return {
stdout: proc.stdout || '',
stderr: proc.stderr || '',
};
}
describe('posture humanizer wiring (Step 6)', () => {
describe('--json mode (SC-6: byte-equal stdout)', () => {
it('stdout JSON deepEquals v5.0.0 snapshot', async () => {
const { stdout } = await runPosture(['--json']);
const actual = JSON.parse(stdout);
const expected = JSON.parse(await readFile(POSTURE_JSON_SNAPSHOT, 'utf-8'));
assert.deepStrictEqual(normalizePosture(actual), normalizePosture(expected));
});
it('does NOT write a scorecard to stderr (suppressed)', async () => {
const { stderr } = await runPosture(['--json']);
assert.ok(!stderr.includes('Config-Audit Health Score'),
'stderr must NOT contain scorecard in --json mode');
assert.ok(!stderr.includes('Configuration health'),
'stderr must NOT contain humanized scorecard in --json mode');
});
it('preserves v5.0.0 finding shape (no humanizer fields in scannerEnvelope)', async () => {
const { stdout } = await runPosture(['--json']);
const actual = JSON.parse(stdout);
for (const s of actual.scannerEnvelope.scanners) {
for (const f of s.findings) {
assert.equal(f.userImpactCategory, undefined,
`${f.id}: --json findings must not have userImpactCategory`);
}
}
});
});
describe('--raw mode (SC-7: byte-equal stdout + verbatim stderr)', () => {
it('stdout JSON deepEquals v5.0.0 snapshot', async () => {
const { stdout } = await runPosture(['--raw']);
const actual = JSON.parse(stdout);
const expected = JSON.parse(await readFile(POSTURE_JSON_SNAPSHOT, 'utf-8'));
assert.deepStrictEqual(normalizePosture(actual), normalizePosture(expected));
});
it('stderr scorecard verbatim matches v5.0.0 stderr snapshot', async () => {
const { stderr } = await runPosture(['--raw']);
const expected = await readFile(POSTURE_STDERR_SNAPSHOT, 'utf-8');
// Compare the scorecard portion verbatim (modulo timing in scanner progress lines)
assert.equal(normalizeStderr(stderr).trim(), normalizeStderr(expected).trim());
});
it('preserves v5.0.0 finding shape in stdout', async () => {
const { stdout } = await runPosture(['--raw']);
const actual = JSON.parse(stdout);
for (const s of actual.scannerEnvelope.scanners) {
for (const f of s.findings) {
assert.equal(f.userImpactCategory, undefined,
`${f.id}: --raw findings must not have userImpactCategory`);
}
}
});
});
describe('default mode (humanized scorecard)', () => {
it('writes humanized scorecard to stderr', async () => {
const { stderr } = await runPosture([]);
// Humanized scorecard must contain at least one user-friendly cue not in raw v5.0.0
const hasGradeContext = /healthy|good shape|attention|polish|setup/i.test(stderr);
assert.ok(hasGradeContext,
`humanized stderr scorecard must contain user-friendly phrasing, got:\n${stderr}`);
});
it('does NOT write JSON to stdout in default mode', async () => {
const { stdout } = await runPosture([]);
assert.equal(stdout.trim(), '', 'default mode must not write JSON to stdout');
});
it('humanized scorecard differs byte-wise from v5.0.0 stderr', async () => {
const { stderr } = await runPosture([]);
const expected = await readFile(POSTURE_STDERR_SNAPSHOT, 'utf-8');
assert.notEqual(normalizeStderr(stderr).trim(), normalizeStderr(expected).trim(),
'humanized stderr must differ from v5.0.0 verbatim stderr');
});
});
});

View file

@ -92,8 +92,10 @@ describe('posture.mjs CLI — minimal project', () => {
});
describe('posture.mjs CLI — terminal output (v3 health format)', () => {
it('scorecard contains health sections', async () => {
const { stderr } = await runPosture([resolve(FIXTURES, 'healthy-project')]);
// These assertions verify the v5.0.0 verbatim scorecard prose. Default mode
// is humanized as of v5.1.0 (Wave 3); --raw is the explicit v5.0.0 path.
it('scorecard contains health sections (v5.0.0 verbatim via --raw)', async () => {
const { stderr } = await runPosture([resolve(FIXTURES, 'healthy-project'), '--raw']);
assert.ok(stderr.includes('Config-Audit Health Score'));
assert.ok(stderr.includes('Health:'));
assert.ok(stderr.includes('Area Scores'));
@ -101,14 +103,14 @@ describe('posture.mjs CLI — terminal output (v3 health format)', () => {
});
it('scorecard does NOT contain legacy metrics', async () => {
const { stderr } = await runPosture([resolve(FIXTURES, 'healthy-project')]);
const { stderr } = await runPosture([resolve(FIXTURES, 'healthy-project'), '--raw']);
assert.ok(!stderr.includes('Maturity:'));
assert.ok(!stderr.includes('Utilization:'));
assert.ok(!stderr.includes('Segment:'));
});
it('scorecard excludes Feature Coverage from area display', async () => {
const { stderr } = await runPosture([resolve(FIXTURES, 'healthy-project')]);
const { stderr } = await runPosture([resolve(FIXTURES, 'healthy-project'), '--raw']);
assert.ok(!stderr.includes('Feature Coverage'));
});
});