import { describe, it } from 'node:test'; import assert from 'node:assert/strict'; import { runAll } from './scenario-read-test.mjs'; describe('SC-4 scenario read-test (humanizer corpus)', () => { it('matches all scenarios in tests/scenarios/', async () => { const { scenarios, failed, passed, total } = await runAll(); assert.ok(total >= 5, `expected at least 5 scenarios in corpus, got ${total}`); if (failed.length === 0) { assert.equal(passed, total); return; } const summary = failed .map((r) => { const reasons = r.failures .map((f) => ` [${f.field}] ${f.reason}`) .join('\n'); return ` ${r.file} (${r.findingId})\n${reasons}`; }) .join('\n\n'); assert.fail( `SC-4: ${failed.length}/${total} scenarios did not match humanizer output\n\n${summary}`, ); void scenarios; // referenced to satisfy lints if helper expands }); it('covers required scanner categories (TOK/CPS, CNF, GAP, SET)', async () => { const { scenarios } = await runAll(); const seen = new Set(); for (const r of scenarios) { const prefix = r.findingId.split('-')[1]; seen.add(prefix); } // TOK and CPS together cover the "wasted tokens" category — at least one must appear. const hasTokenCategory = seen.has('TOK') || seen.has('CPS'); assert.ok(hasTokenCategory, `corpus must include at least one TOK or CPS finding; saw ${[...seen].join(', ')}`); assert.ok(seen.has('CNF'), `corpus must include at least one CNF finding; saw ${[...seen].join(', ')}`); assert.ok(seen.has('GAP'), `corpus must include at least one GAP finding; saw ${[...seen].join(', ')}`); assert.ok(seen.has('SET'), `corpus must include at least one SET finding; saw ${[...seen].join(', ')}`); }); it('includes at least one scenario whose v5.0.0 description carries a tier1 forbidden word', async () => { const { scenarios } = await runAll(); // Read the forbidden-words file at runtime so this assertion stays in sync // with the source of truth (Wave 1 Step 1 artifact). const { readFile } = await import('node:fs/promises'); const { resolve, dirname } = await import('node:path'); const { fileURLToPath } = await import('node:url'); const __dirname = dirname(fileURLToPath(import.meta.url)); const forbiddenPath = resolve(__dirname, 'lint-forbidden-words.json'); const forbidden = JSON.parse(await readFile(forbiddenPath, 'utf8')); const tier1 = forbidden.tier1.map((e) => e.word); const matchesTier1 = (text) => { if (typeof text !== 'string') return false; return tier1.some((word) => { const lower = word.toLowerCase(); const escaped = lower.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); const re = /[ \-./]/.test(lower) ? new RegExp(escaped, 'i') : new RegExp(`\\b${escaped}\\b`, 'i'); return re.test(text); }); }; let found = false; for (const scenario of scenarios) { const file = scenario.file; const path = resolve(__dirname, 'scenarios', file); const body = JSON.parse(await readFile(path, 'utf8')); const desc = body?.scannerInput?.description ?? ''; const title = body?.scannerInput?.title ?? ''; if (matchesTier1(desc) || matchesTier1(title)) { found = true; break; } } assert.ok( found, 'corpus must include at least one scenario whose v5.0.0 title or description contains a tier1 forbidden word', ); }); });