ktg-plugin-marketplace/plugins/config-audit/tests/scenario-read-test.test.mjs
Kjell Tore Guttormsen 8b146bf489 feat(humanizer): scenario read-test corpus + runner (SC-4) [skip-docs]
Step 9 of v5.1.0 humanizer Wave 4. Adds tests/scenario-read-test.mjs
runner, tests/scenario-read-test.test.mjs wrapper, and 5 scenario
fixtures in tests/scenarios/ that feed deterministic raw findings
through humanizeFinding and assert the humanized
title/description/recommendation match brief-owner-approved regex
patterns encoding the ground-truth what/why/whatNext answers.

Corpus selection (per brief criteria):

- 01-tok-cascade.json - TOK/CPS category (token efficiency)
- 02-cps-volatile.json - TOK/CPS category (cache prefix stability)
- 03-cnf-conflict.json - CNF category (conflicts)
- 04-gap-no-claude-md.json - GAP category (feature gap)
- 05-set-invalid-json.json - SET category, AND its v5.0.0 title +
  description carry tier1 'invalid' (the brief criterion 'one finding
  whose v5.0.0 description uses a forbidden word').

Runner mechanics:

- Loads scenarios matching ^\\d{2}-[a-z0-9-]+\\.json$ in sorted order.
- Calls humanizeFinding(scannerInput) and matches each humanized field
  against its declared pattern (case-insensitive regex).
- Verifies humanizer-added structural fields (userImpactCategory,
  userActionLanguage, relevanceContext) are non-empty strings.
- Per session decision (1a) acceptance is deterministic regex matching
  without a runtime human approval gate.

Wrapper adds 3 tests: scenario-match (binds runner to node --test),
category-coverage (TOK/CPS, CNF, GAP, SET all present), and
tier1-presence (at least one v5.0.0 title or description contains a
tier1 forbidden word).

Tests: 736 to 739 (+3 SC-4 tests). Full suite passes.

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
2026-05-01 18:16:23 +02:00

87 lines
3.5 KiB
JavaScript

import { describe, it } from 'node:test';
import assert from 'node:assert/strict';
import { runAll } from './scenario-read-test.mjs';
describe('SC-4 scenario read-test (humanizer corpus)', () => {
it('matches all scenarios in tests/scenarios/', async () => {
const { scenarios, failed, passed, total } = await runAll();
assert.ok(total >= 5, `expected at least 5 scenarios in corpus, got ${total}`);
if (failed.length === 0) {
assert.equal(passed, total);
return;
}
const summary = failed
.map((r) => {
const reasons = r.failures
.map((f) => ` [${f.field}] ${f.reason}`)
.join('\n');
return ` ${r.file} (${r.findingId})\n${reasons}`;
})
.join('\n\n');
assert.fail(
`SC-4: ${failed.length}/${total} scenarios did not match humanizer output\n\n${summary}`,
);
void scenarios; // referenced to satisfy lints if helper expands
});
it('covers required scanner categories (TOK/CPS, CNF, GAP, SET)', async () => {
const { scenarios } = await runAll();
const seen = new Set();
for (const r of scenarios) {
const prefix = r.findingId.split('-')[1];
seen.add(prefix);
}
// TOK and CPS together cover the "wasted tokens" category — at least one must appear.
const hasTokenCategory = seen.has('TOK') || seen.has('CPS');
assert.ok(hasTokenCategory, `corpus must include at least one TOK or CPS finding; saw ${[...seen].join(', ')}`);
assert.ok(seen.has('CNF'), `corpus must include at least one CNF finding; saw ${[...seen].join(', ')}`);
assert.ok(seen.has('GAP'), `corpus must include at least one GAP finding; saw ${[...seen].join(', ')}`);
assert.ok(seen.has('SET'), `corpus must include at least one SET finding; saw ${[...seen].join(', ')}`);
});
it('includes at least one scenario whose v5.0.0 description carries a tier1 forbidden word', async () => {
const { scenarios } = await runAll();
// Read the forbidden-words file at runtime so this assertion stays in sync
// with the source of truth (Wave 1 Step 1 artifact).
const { readFile } = await import('node:fs/promises');
const { resolve, dirname } = await import('node:path');
const { fileURLToPath } = await import('node:url');
const __dirname = dirname(fileURLToPath(import.meta.url));
const forbiddenPath = resolve(__dirname, 'lint-forbidden-words.json');
const forbidden = JSON.parse(await readFile(forbiddenPath, 'utf8'));
const tier1 = forbidden.tier1.map((e) => e.word);
const matchesTier1 = (text) => {
if (typeof text !== 'string') return false;
return tier1.some((word) => {
const lower = word.toLowerCase();
const escaped = lower.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
const re = /[ \-./]/.test(lower)
? new RegExp(escaped, 'i')
: new RegExp(`\\b${escaped}\\b`, 'i');
return re.test(text);
});
};
let found = false;
for (const scenario of scenarios) {
const file = scenario.file;
const path = resolve(__dirname, 'scenarios', file);
const body = JSON.parse(await readFile(path, 'utf8'));
const desc = body?.scannerInput?.description ?? '';
const title = body?.scannerInput?.title ?? '';
if (matchesTier1(desc) || matchesTier1(title)) {
found = true;
break;
}
}
assert.ok(
found,
'corpus must include at least one scenario whose v5.0.0 title or description contains a tier1 forbidden word',
);
});
});