Wave 5 Step 13. Threads the humanizer vocabulary through five audit/
analysis command templates and adds a shape test that locks the
structure in place.
- commands/posture.md, tokens.md, feature-gap.md (findings-renderers):
reference userImpactCategory/userActionLanguage/relevanceContext;
remove hardcoded A/B/C/D/F-to-prose tables (humanizer owns the
grade-context vocabulary now via the stderr scorecard headline).
- commands/manifest.md, whats-active.md (inventory CLIs): add --raw
pass-through for CLI-surface consistency. --raw is a no-op in these
CLIs, but the flag is threaded through so users get uniform behaviour.
- All five files: --raw flag parsed from $ARGUMENTS and passed verbatim
to the underlying scanner CLI when present.
tests/commands/group-a-shape.test.mjs (new, +5 tests, 767 → 772):
- structural: every file has a bash invocation block, Read tool
reference, and --raw/$ARGUMENTS plumbing
- findings-renderers only: at least one humanized field referenced;
no hardcoded "[grade] grade is..." prose tables
97 lines
3.4 KiB
JavaScript
97 lines
3.4 KiB
JavaScript
/**
|
|
* Wave 5 Step 13 — Group A command-template shape tests.
|
|
*
|
|
* Verifies that the 5 audit/analysis command templates have the correct
|
|
* structural shape after the humanizer integration:
|
|
*
|
|
* - All 5 files: contain a Bash invocation block, reference the Read tool,
|
|
* and contain the `--raw` flag (or the literal `"$ARGUMENTS"` string).
|
|
*
|
|
* - Findings-rendering files (posture.md, tokens.md, feature-gap.md):
|
|
* reference at least one of `userImpactCategory|userActionLanguage|
|
|
* relevanceContext`, and do NOT contain hardcoded grade-prose tables
|
|
* of the form `[ABCDF]\s+grade\s+is...`.
|
|
*
|
|
* - Inventory/data-only files (manifest.md, whats-active.md): structural
|
|
* checks only (Bash + Read + --raw pass-through). No humanized-field
|
|
* reference required because these CLIs emit data tables, not findings.
|
|
*/
|
|
|
|
import { test } from 'node:test';
|
|
import { strict as assert } from 'node:assert';
|
|
import { readFile } from 'node:fs/promises';
|
|
import { resolve, dirname } from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const COMMANDS_DIR = resolve(__dirname, '..', '..', 'commands');
|
|
|
|
const GROUP_A_FILES = [
|
|
'posture.md',
|
|
'tokens.md',
|
|
'manifest.md',
|
|
'whats-active.md',
|
|
'feature-gap.md',
|
|
];
|
|
|
|
const FINDINGS_RENDERING_FILES = [
|
|
'posture.md',
|
|
'tokens.md',
|
|
'feature-gap.md',
|
|
];
|
|
|
|
const HUMANIZED_FIELD_REGEX = /userImpactCategory|userActionLanguage|relevanceContext/;
|
|
const RAW_OR_ARGUMENTS_REGEX = /--raw|"\$ARGUMENTS"/;
|
|
const HARDCODED_GRADE_PROSE_REGEX = /[ABCDF]\s+grade\s+is/;
|
|
// A Bash invocation block in markdown is a fenced ``` block tagged with bash.
|
|
const BASH_BLOCK_REGEX = /```bash\b/;
|
|
// Read tool reference: either explicit "Read tool" prose or the frontmatter
|
|
// "allowed-tools" list mentioning Read.
|
|
const READ_TOOL_REGEX = /\bRead\s+tool\b|allowed-tools:.*\bRead\b/;
|
|
|
|
async function readCommand(name) {
|
|
return await readFile(resolve(COMMANDS_DIR, name), 'utf-8');
|
|
}
|
|
|
|
test('Group A: every file contains a Bash invocation block', async () => {
|
|
for (const name of GROUP_A_FILES) {
|
|
const content = await readCommand(name);
|
|
assert.match(content, BASH_BLOCK_REGEX, `${name} missing bash block`);
|
|
}
|
|
});
|
|
|
|
test('Group A: every file references the Read tool', async () => {
|
|
for (const name of GROUP_A_FILES) {
|
|
const content = await readCommand(name);
|
|
assert.match(content, READ_TOOL_REGEX, `${name} missing Read tool reference`);
|
|
}
|
|
});
|
|
|
|
test('Group A: every file contains --raw or "$ARGUMENTS" (pass-through plumbing)', async () => {
|
|
for (const name of GROUP_A_FILES) {
|
|
const content = await readCommand(name);
|
|
assert.match(content, RAW_OR_ARGUMENTS_REGEX, `${name} missing --raw / $ARGUMENTS plumbing`);
|
|
}
|
|
});
|
|
|
|
test('Group A findings-renderers: reference at least one humanized field', async () => {
|
|
for (const name of FINDINGS_RENDERING_FILES) {
|
|
const content = await readCommand(name);
|
|
assert.match(
|
|
content,
|
|
HUMANIZED_FIELD_REGEX,
|
|
`${name} must reference userImpactCategory, userActionLanguage, or relevanceContext`,
|
|
);
|
|
}
|
|
});
|
|
|
|
test('Group A findings-renderers: no hardcoded grade-prose tables', async () => {
|
|
for (const name of FINDINGS_RENDERING_FILES) {
|
|
const content = await readCommand(name);
|
|
assert.doesNotMatch(
|
|
content,
|
|
HARDCODED_GRADE_PROSE_REGEX,
|
|
`${name} contains a hardcoded "[grade] grade is..." prose table — humanizer owns grade vocabulary now`,
|
|
);
|
|
}
|
|
});
|