Wave 1 / Step 2 of v5.1.0 plain-language UX humanizer.
scanners/lib/humanizer-data.mjs exports TRANSLATIONS keyed by
scanner prefix (CML, SET, HKV, RUL, MCP, IMP, CNF, GAP, TOK, CPS,
DIS, COL, PLH). Each scanner has:
- static: exact-title -> {title, description, recommendation}
- patterns: array of {regex, translation} for template-literal titles
- _default: graceful fallback for unknown findings
Architectural change vs. plan: keys translations by exact scanner
title (not finding ID). Reason: finding IDs are sequence-based
(global counter in lib/output.mjs:34), not stable per finding-type
— two runs can produce different IDs for the same logical issue.
Title strings ARE stable (defined as string literals or template
patterns in the scanner source).
Translations follow research/03 SR-1..SR-17:
- active voice, second person, present tense
- sentences <= 25 words
- tier1 absolute prohibitions and tier3 domain jargon are kept out
of prose
- tier1/tier3 terms are permitted inside `backtick spans` (code
references like filenames and field names) — established
technical-doc convention
Test (12 cases): all 13 scanners covered; every static and pattern
entry has the 3 required fields; tier1 and tier3 forbidden-word
checks pass (with backtick-span exclusion); reference-stable
imports. All pass.
Regression: 657/657 tests (645 + 12 new).
Project: .claude/projects/2026-05-01-config-audit-ux-redesign/
177 lines
7.1 KiB
JavaScript
177 lines
7.1 KiB
JavaScript
import { test } from 'node:test';
|
|
import assert from 'node:assert/strict';
|
|
import { readFile } from 'node:fs/promises';
|
|
import { fileURLToPath } from 'node:url';
|
|
import { dirname, resolve } from 'node:path';
|
|
import { TRANSLATIONS } from '../../scanners/lib/humanizer-data.mjs';
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const FORBIDDEN_PATH = resolve(__dirname, '..', 'lint-forbidden-words.json');
|
|
|
|
const EXPECTED_SCANNERS = ['CML', 'SET', 'HKV', 'RUL', 'MCP', 'IMP', 'CNF', 'GAP', 'TOK', 'CPS', 'DIS', 'COL', 'PLH'];
|
|
|
|
function stripBacktickSpans(s) {
|
|
return s.replace(/`[^`]*`/g, '');
|
|
}
|
|
|
|
async function loadForbidden() {
|
|
const raw = await readFile(FORBIDDEN_PATH, 'utf8');
|
|
return JSON.parse(raw);
|
|
}
|
|
|
|
test('TRANSLATIONS exports an object', () => {
|
|
assert.equal(typeof TRANSLATIONS, 'object');
|
|
assert.ok(TRANSLATIONS !== null);
|
|
});
|
|
|
|
test('TRANSLATIONS covers all 13 expected scanner prefixes', () => {
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
assert.ok(TRANSLATIONS[prefix], `missing scanner prefix: ${prefix}`);
|
|
}
|
|
});
|
|
|
|
test('every scanner has a _default fallback with all 3 fields', () => {
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
const scanner = TRANSLATIONS[prefix];
|
|
assert.ok(scanner._default, `${prefix} missing _default`);
|
|
assert.ok(typeof scanner._default.title === 'string' && scanner._default.title.length > 0,
|
|
`${prefix} _default missing title`);
|
|
assert.ok(typeof scanner._default.description === 'string' && scanner._default.description.length > 0,
|
|
`${prefix} _default missing description`);
|
|
assert.ok(typeof scanner._default.recommendation === 'string' && scanner._default.recommendation.length > 0,
|
|
`${prefix} _default missing recommendation`);
|
|
}
|
|
});
|
|
|
|
test('every scanner has a static map (may be empty)', () => {
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
assert.equal(typeof TRANSLATIONS[prefix].static, 'object',
|
|
`${prefix} missing static map`);
|
|
assert.ok(TRANSLATIONS[prefix].static !== null);
|
|
}
|
|
});
|
|
|
|
test('every scanner has a patterns array (may be empty)', () => {
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
assert.ok(Array.isArray(TRANSLATIONS[prefix].patterns),
|
|
`${prefix} patterns must be an array`);
|
|
}
|
|
});
|
|
|
|
test('every static-title entry has all 3 fields', () => {
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
const staticMap = TRANSLATIONS[prefix].static;
|
|
for (const [title, t] of Object.entries(staticMap)) {
|
|
assert.ok(typeof t.title === 'string' && t.title.length > 0,
|
|
`${prefix} static["${title}"] missing title`);
|
|
assert.ok(typeof t.description === 'string' && t.description.length > 0,
|
|
`${prefix} static["${title}"] missing description`);
|
|
assert.ok(typeof t.recommendation === 'string' && t.recommendation.length > 0,
|
|
`${prefix} static["${title}"] missing recommendation`);
|
|
}
|
|
}
|
|
});
|
|
|
|
test('every pattern entry has regex + translation with all 3 fields', () => {
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
for (const p of TRANSLATIONS[prefix].patterns) {
|
|
assert.ok(p.regex instanceof RegExp,
|
|
`${prefix} pattern missing regex`);
|
|
assert.ok(typeof p.translation.title === 'string' && p.translation.title.length > 0,
|
|
`${prefix} pattern translation missing title`);
|
|
assert.ok(typeof p.translation.description === 'string' && p.translation.description.length > 0,
|
|
`${prefix} pattern translation missing description`);
|
|
assert.ok(typeof p.translation.recommendation === 'string' && p.translation.recommendation.length > 0,
|
|
`${prefix} pattern translation missing recommendation`);
|
|
}
|
|
}
|
|
});
|
|
|
|
test('no translated string contains tier1 forbidden words (outside backtick spans)', async () => {
|
|
const data = await loadForbidden();
|
|
const tier1Words = data.tier1.map((e) => e.word);
|
|
const violations = [];
|
|
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
const scanner = TRANSLATIONS[prefix];
|
|
const allTranslations = [
|
|
scanner._default,
|
|
...Object.values(scanner.static),
|
|
...scanner.patterns.map((p) => p.translation),
|
|
];
|
|
|
|
for (const t of allTranslations) {
|
|
for (const field of ['title', 'description', 'recommendation']) {
|
|
const text = stripBacktickSpans(t[field]).toLowerCase();
|
|
for (const word of tier1Words) {
|
|
const lower = word.toLowerCase();
|
|
// word-boundary match for single words, plain substring for multi-word phrases
|
|
const re = lower.includes(' ')
|
|
? new RegExp(lower.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'))
|
|
: new RegExp(`\\b${lower.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}\\b`);
|
|
if (re.test(text)) {
|
|
violations.push(`${prefix} ${field}: "${word}" in "${t[field]}"`);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
assert.equal(violations.length, 0,
|
|
`tier1 violations:\n ${violations.slice(0, 20).join('\n ')}`);
|
|
});
|
|
|
|
test('no translated string contains tier3 jargon (outside backtick spans)', async () => {
|
|
const data = await loadForbidden();
|
|
const tier3Words = data.tier3.map((e) => e.word);
|
|
const violations = [];
|
|
|
|
for (const prefix of EXPECTED_SCANNERS) {
|
|
const scanner = TRANSLATIONS[prefix];
|
|
const allTranslations = [
|
|
scanner._default,
|
|
...Object.values(scanner.static),
|
|
...scanner.patterns.map((p) => p.translation),
|
|
];
|
|
|
|
for (const t of allTranslations) {
|
|
for (const field of ['title', 'description', 'recommendation']) {
|
|
const text = stripBacktickSpans(t[field]);
|
|
for (const word of tier3Words) {
|
|
const lower = word.toLowerCase();
|
|
const re = lower.includes(' ') || lower.includes('/') || lower.includes('-') || lower.includes('.')
|
|
? new RegExp(lower.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'i')
|
|
: new RegExp(`\\b${lower.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}\\b`, 'i');
|
|
if (re.test(text)) {
|
|
violations.push(`${prefix} ${field}: "${word}" in "${t[field]}"`);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
assert.equal(violations.length, 0,
|
|
`tier3 violations (jargon outside backticks):\n ${violations.slice(0, 20).join('\n ')}`);
|
|
});
|
|
|
|
test('CML, SET, HKV, RUL, MCP, IMP, GAP, TOK, PLH have non-empty static maps', () => {
|
|
// These scanners produce findings with titles we documented. Empty static map suggests missed coverage.
|
|
for (const prefix of ['CML', 'SET', 'HKV', 'RUL', 'MCP', 'IMP', 'GAP', 'TOK', 'PLH']) {
|
|
const count = Object.keys(TRANSLATIONS[prefix].static).length;
|
|
assert.ok(count > 0, `${prefix}.static is empty — expected at least 1 translated title`);
|
|
}
|
|
});
|
|
|
|
test('CNF, COL, PLH have at least one pattern entry (template-literal titles)', () => {
|
|
// These scanners use template-literal titles for some findings.
|
|
for (const prefix of ['CNF', 'COL', 'PLH']) {
|
|
assert.ok(TRANSLATIONS[prefix].patterns.length > 0,
|
|
`${prefix} expected ≥1 pattern entry for template-literal titles`);
|
|
}
|
|
});
|
|
|
|
test('TRANSLATIONS does not mutate when re-imported (deep-frozen-ish)', async () => {
|
|
// Quick sanity — translate object reference equality between imports
|
|
const { TRANSLATIONS: t2 } = await import('../../scanners/lib/humanizer-data.mjs');
|
|
assert.equal(t2, TRANSLATIONS, 'TRANSLATIONS reference should be stable across imports');
|
|
});
|