diff --git a/CLAUDE.md b/CLAUDE.md index 7deb084..00f201d 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -7,7 +7,7 @@ Open-source Claude Code plugin marketplace. Solo project by Kjell Tore Guttormse ``` plugins/ ai-psychosis/ v1.0.0 — Interaction awareness (sycophancy, reinforcement loops) - config-audit/ v3.0.1 — Configuration intelligence (health, opportunities, auto-fix) + config-audit/ v3.1.0 — Configuration intelligence (health, opportunities, auto-fix, whats-active) linkedin-thought-leadership/ v1.2.0 — LinkedIn content pipeline + analytics llm-security/ v6.0.0 — Security scanning, auditing, threat modeling ms-ai-architect/ v1.8.0 — Microsoft AI architecture (Cosmo Skyberg persona) diff --git a/README.md b/README.md index df008a0..eb7e0e5 100644 --- a/README.md +++ b/README.md @@ -41,19 +41,20 @@ Key commands: `/security posture`, `/security audit`, `/security scan`, `/securi --- -### [Config-Audit](plugins/config-audit/) `v3.0.1` +### [Config-Audit](plugins/config-audit/) `v3.1.0` -Configuration intelligence for Claude Code — health checks, feature discovery, and auto-fix. +Configuration intelligence for Claude Code — health checks, feature discovery, auto-fix, and active-config inventory. -Claude Code reads instructions from 7+ file types across multiple scopes. This plugin tells you what's wrong, what's missing, and what's silently conflicting: +Claude Code reads instructions from 7+ file types across multiple scopes. This plugin tells you what's wrong, what's missing, what's silently conflicting, and now — what's actually loaded: - **Health** — 7 deterministic scanners verify correctness across every configuration file (broken imports, deprecated settings, conflicting rules, permission contradictions) - **Opportunities** — context-aware recommendations for Claude Code features you're not using - **Action** — auto-fix with mandatory backups, syntax validation, rollback support, and human-in-the-loop workflow +- **What's active** — read-only inventory of plugins, skills, MCP servers, hooks, and CLAUDE.md cascade for a repo, with token estimates -Key commands: `/config-audit posture`, `/config-audit discover`, `/config-audit feature-gap`, `/config-audit fix` +Key commands: `/config-audit posture`, `/config-audit feature-gap`, `/config-audit fix`, `/config-audit whats-active` -6 agents · 8 scanners · 15 commands · 482+ tests +6 agents · 8 scanners · 16 commands · 522+ tests → [Full documentation](plugins/config-audit/README.md) diff --git a/plugins/config-audit/.claude-plugin/plugin.json b/plugins/config-audit/.claude-plugin/plugin.json index b12bd80..f34cd46 100644 --- a/plugins/config-audit/.claude-plugin/plugin.json +++ b/plugins/config-audit/.claude-plugin/plugin.json @@ -1,7 +1,7 @@ { "name": "config-audit", "description": "Multi-agent workflow for analyzing, reporting, and optimizing Claude Code configuration across your entire machine", - "version": "3.0.1", + "version": "3.1.0", "author": { "name": "Kjell Tore Guttormsen" }, diff --git a/plugins/config-audit/CHANGELOG.md b/plugins/config-audit/CHANGELOG.md index a1c6f44..467d48c 100644 --- a/plugins/config-audit/CHANGELOG.md +++ b/plugins/config-audit/CHANGELOG.md @@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [3.1.0] - 2026-04-14 + +### Summary +New read-only command `/config-audit whats-active` — shows exactly what Claude Code loads for a given repo, with token estimates. + +### Added +- **`/config-audit whats-active [path]`** — inventory of active plugins, skills, MCP servers, hooks, and CLAUDE.md cascade for a repo, with source attribution (user/project/plugin) and rough token estimates. Read-only, <2s. +- `scanners/lib/active-config-reader.mjs` — pure async helper: `readActiveConfig()`, `detectGitRoot()`, `walkClaudeMdCascade()`, `readClaudeJsonProjectSlice()` (longest-prefix matching), `enumeratePlugins()`, `enumerateSkills()`, `readActiveHooks()`, `readActiveMcpServers()`, `estimateTokens()`. +- `scanners/whats-active.mjs` — thin CLI shim supporting `--json`, `--output-file`, `--verbose`, `--suggest-disables`. +- Optional `--suggest-disables` flag surfaces deterministic disable candidates (disabled MCP servers, zero-item plugins, unreferenced plugins, orphan skills) and invites an LLM judgment pass in the command. +- 36 new tests in `tests/lib/active-config-reader.test.mjs`, plus a `rich-repo` tmpdir fixture helper. + +### Changed +- Version bump: `3.0.1` → `3.1.0` (minor, additive feature, no breaking changes). +- Command count: 15 → 16. + ## [3.0.1] - 2026-04-04 ### Summary diff --git a/plugins/config-audit/CLAUDE.md b/plugins/config-audit/CLAUDE.md index f9c2be6..6ff8eaf 100644 --- a/plugins/config-audit/CLAUDE.md +++ b/plugins/config-audit/CLAUDE.md @@ -30,6 +30,7 @@ Analyzes and optimizes Claude Code configuration across three pillars: |---------|-------------| | `/config-audit drift` | Compare current config against saved baseline | | `/config-audit plugin-health` | Audit plugin structure, frontmatter, cross-plugin coherence | +| `/config-audit whats-active` | Read-only inventory of plugins, skills, MCP, hooks, CLAUDE.md active for a repo (with token estimates) | | `/config-audit discover` | Run discovery phase only | | `/config-audit analyze` | Run analysis phase only | | `/config-audit interview` | Gather user preferences (opt-in) | @@ -79,6 +80,7 @@ Scanner CLI: `node scanners/scan-orchestrator.mjs [--global] [--full-mach | `baseline.mjs` | Baseline save/load/list/delete for drift detection | | `report-generator.mjs` | Unified markdown reports: posture, drift, plugin health | | `suppression.mjs` | .config-audit-ignore parsing, finding suppression, audit trail | +| `active-config-reader.mjs` | Read-only inventory: readActiveConfig(), detectGitRoot(), walkClaudeMdCascade(), readClaudeJsonProjectSlice() (longest-prefix match), enumeratePlugins(), enumerateSkills(), readActiveHooks(), readActiveMcpServers(), estimateTokens() | ### Action Engines (`scanners/`) @@ -88,6 +90,7 @@ Scanner CLI: `node scanners/scan-orchestrator.mjs [--global] [--full-mach | `rollback-engine.mjs` | listBackups(), restoreBackup(), deleteBackup() | | `fix-cli.mjs` | CLI: `node fix-cli.mjs [--apply] [--json] [--global]` | | `drift-cli.mjs` | CLI: `node drift-cli.mjs [--save] [--baseline name] [--json]` | +| `whats-active.mjs` | CLI: `node whats-active.mjs [--json] [--verbose] [--suggest-disables]` — read-only active-config inventory | ### Standalone Scanner diff --git a/plugins/config-audit/README.md b/plugins/config-audit/README.md index 0a67ae4..3f7dad8 100644 --- a/plugins/config-audit/README.md +++ b/plugins/config-audit/README.md @@ -4,13 +4,13 @@ *Built for my own Claude Code workflow and shared openly for anyone who finds it useful. This is a solo project — bug reports and feature requests are welcome, but pull requests are not accepted.* -![Version](https://img.shields.io/badge/version-3.0.1-blue) +![Version](https://img.shields.io/badge/version-3.1.0-blue) ![Platform](https://img.shields.io/badge/platform-Claude_Code_Plugin-purple) ![Scanners](https://img.shields.io/badge/scanners-8-cyan) -![Commands](https://img.shields.io/badge/commands-15-green) +![Commands](https://img.shields.io/badge/commands-16-green) ![Agents](https://img.shields.io/badge/agents-6-orange) ![Hooks](https://img.shields.io/badge/hooks-4-red) -![Tests](https://img.shields.io/badge/tests-482+-brightgreen) +![Tests](https://img.shields.io/badge/tests-522+-brightgreen) ![License](https://img.shields.io/badge/license-MIT-lightgrey) A Claude Code plugin that checks configuration health, suggests context-aware improvements, and auto-fixes issues — `CLAUDE.md`, `settings.json`, hooks, rules, MCP servers, `@imports`, and plugins. 7 quality scanners for correctness, context-aware feature recommendations, auto-fix with backup/rollback. Zero external dependencies. @@ -525,6 +525,7 @@ This plugin is cautious by design — configuration files are important, and a b | Version | Date | Highlights | |---------|------|-----------| +| **3.1.0** | 2026-04-14 | New `/config-audit whats-active` — read-only inventory of active plugins, skills, MCP, hooks, CLAUDE.md for a repo, with token estimates. 522 tests | | **3.0.1** | 2026-04-04 | Cross-platform fix: Windows path separators. 486 tests | | **3.0.0** | 2026-04-04 | Health redesign: quality-only grades, context-aware opportunities (replaces utilization/maturity/segment), Anthropic guidance. 482 tests | | **2.2.0** | 2026-04-04 | Fixture filtering (test findings excluded from grades), session path fix, UX polish. 461 tests | diff --git a/plugins/config-audit/commands/config-audit.md b/plugins/config-audit/commands/config-audit.md index 80b6999..7a46f5a 100644 --- a/plugins/config-audit/commands/config-audit.md +++ b/plugins/config-audit/commands/config-audit.md @@ -1,7 +1,7 @@ --- name: config-audit description: Claude Code Configuration Intelligence - audit, analyze, and optimize your configuration -argument-hint: "[posture|feature-gap|fix|rollback|plan|implement|help|discover|analyze|interview|drift|plugin-health|status|cleanup]" +argument-hint: "[posture|feature-gap|fix|rollback|plan|implement|help|discover|analyze|interview|drift|plugin-health|whats-active|status|cleanup]" allowed-tools: Read, Write, Glob, Grep, Bash, Agent, AskUserQuestion model: opus --- @@ -25,6 +25,7 @@ If a subcommand is provided, route to it: - `interview` → `/config-audit:interview` - `drift` → `/config-audit:drift` - `plugin-health` → `/config-audit:plugin-health` +- `whats-active` → `/config-audit:whats-active` - `status` → `/config-audit:status` - `cleanup` → `/config-audit:cleanup` diff --git a/plugins/config-audit/commands/help.md b/plugins/config-audit/commands/help.md index a12d9c2..1da8e11 100644 --- a/plugins/config-audit/commands/help.md +++ b/plugins/config-audit/commands/help.md @@ -37,6 +37,7 @@ Just run `/config-audit` — it auto-detects your project scope and runs a full |---------|-------------| | `/config-audit drift` | Compare current config against a saved baseline | | `/config-audit plugin-health` | Audit plugin structure and frontmatter quality | +| `/config-audit whats-active` | Show active plugins/skills/MCP/hooks/CLAUDE.md with token estimates | ### Utility diff --git a/plugins/config-audit/commands/whats-active.md b/plugins/config-audit/commands/whats-active.md new file mode 100644 index 0000000..fefcb19 --- /dev/null +++ b/plugins/config-audit/commands/whats-active.md @@ -0,0 +1,175 @@ +--- +name: config-audit:whats-active +description: Show which plugins, skills, MCP servers, hooks, and CLAUDE.md files are active for a repo — with token estimates +argument-hint: "[path] [--json] [--verbose] [--suggest-disables]" +allowed-tools: Read, Glob, Bash +model: sonnet +--- + +# Config-Audit: What's Active + +Show a complete, read-only inventory of everything Claude Code loads for a given repo — plugins, skills, MCP servers, hooks, CLAUDE.md cascade — with source attribution and rough token estimates. Helps identify candidates for disabling without guessing. + +## UX Rules (MANDATORY — from `.claude/rules/ux-rules.md`) + +1. **Never show raw JSON or stderr output.** Always use `--output-file` + `2>/dev/null`. +2. **Narrate before acting.** Tell the user what you're about to do. +3. **Read, don't dump.** Read the JSON file and render formatted tables. +4. **End with context-sensitive next steps.** + +## Implementation + +### Step 1: Parse `$ARGUMENTS` + +Split `$ARGUMENTS` into a path and flags. Path is the first non-flag argument. Default to `.` (current working directory). Recognized flags: + +- `--json` — emit raw JSON instead of rendered tables (power-user mode) +- `--verbose` — include per-file byte/line detail +- `--suggest-disables` — append deterministic disable-candidates + LLM-judgment pass + +### Step 2: Run the CLI silently + +Tell the user: **"Reading active configuration for ``..."** + +```bash +TMPFILE="/tmp/ca-whats-active-$$.json" +node ${CLAUDE_PLUGIN_ROOT}/scanners/whats-active.mjs --output-file "$TMPFILE" [--verbose] [--suggest-disables] 2>/dev/null; echo $? +``` + +**Exit code handling:** +- `0` → continue +- `3` → tell user: "Couldn't read configuration. Check that the path exists and is a directory." Stop. + +### Step 3: If `--json` was requested, cat the file and stop + +```bash +cat "$TMPFILE" +``` + +Do NOT render tables in JSON mode. + +### Step 4: Read JSON and render + +Use the Read tool on `$TMPFILE`. Extract: + +- `meta.repoPath`, `meta.durationMs`, `meta.gitRoot`, `meta.projectKey` +- `totals.estimatedTokens.grandTotal` (and subtotals) +- `claudeMd.files[]` — render cascade table +- `plugins[]` — render plugin table +- `skills[]` — render skills table +- `mcpServers[]` — render MCP table (disabled shown italic) +- `hooks[]` — render hooks table + +Render as markdown: + +```markdown +**Active configuration for ``** — ~{grandTotal} tokens loaded at startup + +{if gitRoot != repoPath: "Git root: ``"} +{if projectKey: "`.claude.json` project slice: ``"} + +### CLAUDE.md cascade ({claudeMd.files.length} files, ~{claudeMd.estimatedTokens} tokens) + +| Scope | Path | Bytes | Lines | +|-------|------|-------|-------| +| {scope} | `` | {bytes} | {lines} | +| ... | ... | ... | ... | + +### Plugins ({plugins.length}, ~{plugins subtotal} tokens) + +| Plugin | Version | Commands | Agents | Skills | Hooks | Rules | Tokens | +|--------|---------|----------|--------|--------|-------|-------|--------| +| {name} | {version} | {commands} | {agents} | {skills} | {hooks} | {rules} | ~{estimatedTokens} | + +### Skills ({skills.length}, ~{skills subtotal} tokens) + +| Skill | Source | Tokens | +|-------|--------|--------| +| {name} | {source}{if pluginName: ` (${pluginName})`} | ~{estimatedTokens} | + +### MCP Servers ({mcpServers.length}, ~{mcpServers subtotal} tokens) + +| Server | Source | Status | Command | +|--------|--------|--------|---------| +| {name} | {source} | {enabled ? "enabled" : "*disabled*"} | `{command}` | + +### Hooks ({hooks.length}, ~{hooks subtotal} tokens) + +| Event | Matcher | Source | +|-------|---------|--------| +| {event} | {matcher or "-"} | {source} | + +### Settings cascade + +| Scope | Path | Keys | +|-------|------|------| +| user | `` | {keyCount} | +| project | `` | {keyCount} | +| local | `` | {keyCount or "(missing)"} | + +### Totals + +| Category | Items | Estimated tokens | +|----------|-------|------------------| +| CLAUDE.md | {claudeMdFiles} | ~{claudeMd} | +| Plugins | {plugins} | ~{plugins} | +| Skills | {skills} | ~{skills} | +| MCP servers | {mcpServers} | ~{mcpServers} | +| Hooks | {hooks} | ~{hooks} | +| **Grand total** | — | **~{grandTotal}** | + +_Estimates assume ~4 chars/token (Claude ballpark). Real token count varies ±15%._ +``` + +### Step 5: If `--verbose`, add per-file detail + +For each CLAUDE.md file, skill, and plugin, include a nested "Details" list with bytes, lines, and full path. + +### Step 6: If `--suggest-disables`, show candidates + +First show deterministic signals from `suggestDisables.candidates[]`: + +```markdown +### Disable candidates (deterministic) + +| Kind | Name | Reason | Confidence | +|------|------|--------|------------| +| {kind} | {name} | {reason} | {confidence} | +``` + +Then run LLM judgment — check `git log --oneline -20` and project manifests (package.json/Cargo.toml/etc.) to propose up to **3** additional candidates. For each candidate, you MUST: +1. Name the specific redundancy +2. Name the signal the user should check to confirm + +Do NOT suggest items you can't name concrete redundancy for. If you can't find 3 strong candidates, return fewer or zero. + +### Step 7: Cleanup and next steps + +```bash +rm -f "$TMPFILE" +``` + +```markdown +### What's next + +- **`/config-audit posture`** — check configuration health (A-F grades per area) +- **`/config-audit feature-gap`** — context-aware recommendations for features you aren't using +- **Disable a plugin:** edit `~/.claude/settings.json` → `enabledPlugins` (remove the entry) +- **Disable an MCP server:** edit `~/.claude.json` → `projects..disabledMcpjsonServers` +- **Re-run with flags:** `/config-audit whats-active --verbose` (details) or `--suggest-disables` (pruning help) +``` + +## Scope and limits + +- **Read-only.** This command never writes to configuration files — no mkdir, no edits, no deletes. +- **Single repo.** Scans one repo path per invocation. Cross-repo rollups are out of scope. +- **Ballpark token counts.** Estimates are deterministic but not calibrated against Claude's tokenizer. Use them to compare categories, not to predict exact billing. +- **No runtime queries.** We inspect config files only — we do not connect to MCP servers or invoke hooks. + +## Error handling + +| Condition | Action | +|-----------|--------| +| Exit code 3 | Tell user path is invalid, suggest checking path exists | +| JSON parse fails (shouldn't happen — CLI writes valid JSON) | Tell user to re-run, mention this as a bug to report | +| No plugins, no CLAUDE.md, no hooks found | Still render with zeroes; suggest `/config-audit feature-gap` for setup help | diff --git a/plugins/config-audit/scanners/lib/active-config-reader.mjs b/plugins/config-audit/scanners/lib/active-config-reader.mjs new file mode 100644 index 0000000..d76f6af --- /dev/null +++ b/plugins/config-audit/scanners/lib/active-config-reader.mjs @@ -0,0 +1,827 @@ +/** + * Active Config Reader — enumerates everything Claude Code actually loads for a repo. + * Read-only helper used by `scanners/whats-active.mjs` and the `whats-active` command. + * + * All functions are async and side-effect-free (no writes). + * Zero external dependencies. + */ + +import { readFile, readdir, stat, realpath } from 'node:fs/promises'; +import { join, resolve, dirname, basename, isAbsolute, sep } from 'node:path'; +import { parseFrontmatter, parseJson, findImports } from './yaml-parser.mjs'; +import { lineCount, normalizePath } from './string-utils.mjs'; +import { discoverPlugins } from '../plugin-health-scanner.mjs'; + +const SCHEMA_VERSION = '1.0.0'; + +// ───────────────────────────────────────────────────────────────────────── +// Token estimation +// ───────────────────────────────────────────────────────────────────────── + +/** + * Estimate tokens for a given byte count and content kind. + * Deterministic heuristic — see feature plan §4 for rationale. + * + * @param {number} bytes - Byte count (or item count for kind='item') + * @param {'markdown'|'frontmatter'|'json'|'item'} kind + * @returns {number} Integer token count (rounded up) + */ +export function estimateTokens(bytes, kind = 'markdown') { + if (kind === 'item') return 15; + if (typeof bytes !== 'number' || bytes < 0 || !Number.isFinite(bytes)) return 0; + if (kind === 'frontmatter') { + const capped = Math.min(bytes, 600); + return Math.ceil(capped / 4); + } + if (kind === 'json') return Math.ceil(bytes / 3.5); + // default: markdown + return Math.ceil(bytes / 4); +} + +// ───────────────────────────────────────────────────────────────────────── +// Git root detection +// ───────────────────────────────────────────────────────────────────────── + +/** + * Walk up from startPath looking for a .git directory (or .git file for worktrees). + * @param {string} startPath + * @returns {Promise} absolute path to git root, or null if none + */ +export async function detectGitRoot(startPath) { + let current = resolve(startPath); + const root = resolve('/'); + while (current !== root) { + try { + await stat(join(current, '.git')); + return current; + } catch { /* not here */ } + const parent = dirname(current); + if (parent === current) break; + current = parent; + } + return null; +} + +// ───────────────────────────────────────────────────────────────────────── +// CLAUDE.md cascade +// ───────────────────────────────────────────────────────────────────────── + +/** + * Enumerate all CLAUDE.md files that load for a given repo path, in load order: + * managed → user (~/.claude/CLAUDE.md) → ancestor CLAUDE.md (walking up to $HOME) → + * repo CLAUDE.md → @imports (recursive, deduped). + * + * Each file in the result includes absolute path, scope, bytes, lines, and parent. + * Imports are marked with scope='import' and `parent` is the absolute path of the + * file that imported them. + * + * @param {string} repoPath + * @returns {Promise<{ files: Array<{path:string, scope:string, bytes:number, lines:number, parent:string|null}>, totalBytes:number, totalLines:number, estimatedTokens:number }>} + */ +export async function walkClaudeMdCascade(repoPath) { + const home = process.env.HOME || process.env.USERPROFILE || ''; + const absRepoPath = resolve(repoPath); + const files = []; + const seen = new Set(); + + // Managed locations (platform-dependent, best effort) + const managedCandidates = [ + '/Library/Application Support/ClaudeCode/CLAUDE.md', + '/etc/claude-code/CLAUDE.md', + ]; + for (const p of managedCandidates) { + await tryAddClaudeMd(p, 'managed', null, files, seen); + } + + // User: ~/.claude/CLAUDE.md + if (home) { + await tryAddClaudeMd(join(home, '.claude', 'CLAUDE.md'), 'user', null, files, seen); + } + + // Ancestors between $HOME and repoPath (exclusive of $HOME, inclusive of repoPath) + const ancestorChain = buildAncestorChain(absRepoPath, home); + for (const ancestor of ancestorChain) { + const candidate = join(ancestor, 'CLAUDE.md'); + const scope = ancestor === absRepoPath ? 'project' : 'project'; + await tryAddClaudeMd(candidate, scope, null, files, seen); + // Also project-local variant + if (ancestor === absRepoPath) { + await tryAddClaudeMd(join(ancestor, 'CLAUDE.local.md'), 'local', null, files, seen); + } + } + + // Recursively resolve @imports from all files found so far + const queue = files.slice(); + while (queue.length > 0) { + const parent = queue.shift(); + let content; + try { + content = await readFile(parent.path, 'utf-8'); + } catch { continue; } + const imports = findImports(content); + for (const imp of imports) { + const resolved = resolveImportPath(imp.path, parent.path, home); + if (!resolved || seen.has(resolved)) continue; + const added = await tryAddClaudeMd(resolved, 'import', parent.path, files, seen); + if (added) queue.push(added); + } + } + + const totalBytes = files.reduce((sum, f) => sum + f.bytes, 0); + const totalLines = files.reduce((sum, f) => sum + f.lines, 0); + const estimatedTokens = estimateTokens(totalBytes, 'markdown'); + + return { files, totalBytes, totalLines, estimatedTokens }; +} + +async function tryAddClaudeMd(absPath, scope, parent, files, seen) { + if (seen.has(absPath)) return null; + try { + const s = await stat(absPath); + if (!s.isFile()) return null; + const content = await readFile(absPath, 'utf-8'); + const entry = { + path: absPath, + scope, + bytes: s.size, + lines: lineCount(content), + parent, + }; + files.push(entry); + seen.add(absPath); + return entry; + } catch { + return null; + } +} + +function buildAncestorChain(absRepoPath, home) { + const chain = []; + let current = absRepoPath; + const normalizedHome = home ? resolve(home) : null; + const fsRoot = resolve('/'); + while (current !== fsRoot) { + if (normalizedHome && current === normalizedHome) break; + chain.push(current); + const parent = dirname(current); + if (parent === current) break; + current = parent; + } + // Load order: outer → inner (so we reverse the walked-up chain) + return chain.reverse(); +} + +function resolveImportPath(importPath, fromFile, home) { + let p = importPath.trim(); + if (!p) return null; + if (p.startsWith('~/')) p = join(home, p.slice(2)); + else if (p.startsWith('~')) p = join(home, p.slice(1)); + if (!isAbsolute(p)) p = resolve(dirname(fromFile), p); + return p; +} + +// ───────────────────────────────────────────────────────────────────────── +// .claude.json project slice +// ───────────────────────────────────────────────────────────────────────── + +/** + * Read ~/.claude.json and return the best-matching projects slice for repoPath. + * Uses longest-prefix matching — if two keys match, the deeper one wins. + * Paths are normalized (trailing slashes stripped) before comparison. + * + * @param {string} repoPath + * @returns {Promise<{ projectKey: string|null, mcpServers: object, enabledMcpjsonServers: string[], disabledMcpjsonServers: string[], enabledPlugins: object, raw: object|null }>} + */ +export async function readClaudeJsonProjectSlice(repoPath) { + const home = process.env.HOME || process.env.USERPROFILE || ''; + const claudeJsonPath = join(home, '.claude.json'); + const empty = { + projectKey: null, + mcpServers: {}, + enabledMcpjsonServers: [], + disabledMcpjsonServers: [], + enabledPlugins: {}, + raw: null, + }; + + let content; + try { + const s = await stat(claudeJsonPath); + // Safety: skip pathologically large files (>10MB) + if (s.size > 10 * 1024 * 1024) return empty; + content = await readFile(claudeJsonPath, 'utf-8'); + } catch { + return empty; + } + + const parsed = parseJson(content); + if (!parsed) return empty; + + const target = normalizePath(resolve(repoPath)); + const projects = parsed.projects || {}; + const keys = Object.keys(projects); + + // Exact match first, then longest prefix (with path-boundary check) + let best = null; + let bestLen = -1; + for (const key of keys) { + const normKey = normalizePath(key); + if (normKey === target) { best = key; bestLen = normKey.length; break; } + // ancestor prefix: target must start with key followed by sep + if (target === normKey || target.startsWith(normKey + sep)) { + if (normKey.length > bestLen) { + best = key; + bestLen = normKey.length; + } + } + } + + if (!best) return { ...empty, raw: parsed }; + + const slice = projects[best] || {}; + return { + projectKey: best, + mcpServers: slice.mcpServers || {}, + enabledMcpjsonServers: Array.isArray(slice.enabledMcpjsonServers) ? slice.enabledMcpjsonServers : [], + disabledMcpjsonServers: Array.isArray(slice.disabledMcpjsonServers) ? slice.disabledMcpjsonServers : [], + enabledPlugins: slice.enabledPlugins || {}, + raw: parsed, + }; +} + +// ───────────────────────────────────────────────────────────────────────── +// Plugin enumeration +// ───────────────────────────────────────────────────────────────────────── + +/** + * Enumerate all plugins installed under ~/.claude/plugins/marketplaces. + * For each plugin: counts commands, agents, skills, hooks, rules; reads version from plugin.json. + * + * @returns {Promise>} + */ +export async function enumeratePlugins() { + const home = process.env.HOME || process.env.USERPROFILE || ''; + if (!home) return []; + + const marketplacesRoot = join(home, '.claude', 'plugins', 'marketplaces'); + const pluginRoots = await discoverAllPluginsUnder(marketplacesRoot); + + // Dedupe via realpath (symlinks are common) + const seen = new Set(); + const results = []; + for (const root of pluginRoots) { + let canonical = root; + try { canonical = await realpath(root); } catch { /* ignore */ } + if (seen.has(canonical)) continue; + seen.add(canonical); + + const info = await countPluginItems(root); + let version = null; + let name = basename(root); + try { + const pluginJson = await readFile(join(root, '.claude-plugin', 'plugin.json'), 'utf-8'); + const parsed = parseJson(pluginJson); + if (parsed) { + version = parsed.version || null; + if (parsed.name) name = parsed.name; + } + } catch { /* no plugin.json */ } + + results.push({ + name, + path: root, + version, + commands: info.commands, + agents: info.agents, + skills: info.skills, + hooks: info.hooks, + rules: info.rules, + totalBytes: info.totalBytes, + estimatedTokens: info.estimatedTokens, + }); + } + + return results; +} + +async function discoverAllPluginsUnder(marketplacesRoot) { + const results = []; + let marketplaces; + try { + marketplaces = await readdir(marketplacesRoot, { withFileTypes: true }); + } catch { + return results; + } + for (const m of marketplaces) { + if (!m.isDirectory()) continue; + const mpDir = join(marketplacesRoot, m.name); + // A marketplace has either a `plugins/` dir or plugins directly + const pluginsDir = join(mpDir, 'plugins'); + const found = await discoverPlugins(pluginsDir).catch(() => []); + if (found.length > 0) { + results.push(...found); + } else { + // Fallback: treat marketplace itself as plugin root to scan + const alt = await discoverPlugins(mpDir).catch(() => []); + results.push(...alt); + } + } + return results; +} + +async function countPluginItems(pluginRoot) { + const counts = { commands: 0, agents: 0, skills: 0, hooks: 0, rules: 0, totalBytes: 0, estimatedTokens: 0 }; + + // Commands (frontmatter — only small portion loaded at startup) + const commandsDir = join(pluginRoot, 'commands'); + const commandFiles = await listMarkdownFiles(commandsDir); + counts.commands = commandFiles.length; + for (const f of commandFiles) { + counts.totalBytes += f.size; + counts.estimatedTokens += estimateTokens(f.size, 'frontmatter'); + } + + // Agents (frontmatter similarly) + const agentsDir = join(pluginRoot, 'agents'); + const agentFiles = await listMarkdownFiles(agentsDir); + counts.agents = agentFiles.length; + for (const f of agentFiles) { + counts.totalBytes += f.size; + counts.estimatedTokens += estimateTokens(f.size, 'frontmatter'); + } + + // Skills (SKILL.md bodies) + const skillsDir = join(pluginRoot, 'skills'); + const skillFiles = await findSkillMdFiles(skillsDir); + counts.skills = skillFiles.length; + for (const f of skillFiles) { + counts.totalBytes += f.size; + counts.estimatedTokens += estimateTokens(f.size, 'markdown'); + } + + // Hooks (hooks.json — count entries) + const hooksJsonPath = join(pluginRoot, 'hooks', 'hooks.json'); + try { + const s = await stat(hooksJsonPath); + const content = await readFile(hooksJsonPath, 'utf-8'); + const parsed = parseJson(content); + if (parsed && parsed.hooks && typeof parsed.hooks === 'object') { + for (const event of Object.keys(parsed.hooks)) { + const arr = parsed.hooks[event]; + if (Array.isArray(arr)) { + for (const entry of arr) { + if (entry && Array.isArray(entry.hooks)) { + counts.hooks += entry.hooks.length; + } else { + counts.hooks += 1; + } + } + } + } + } + counts.totalBytes += s.size; + counts.estimatedTokens += estimateTokens(s.size, 'json'); + } catch { /* no hooks */ } + + // Rules + const rulesDir = join(pluginRoot, 'rules'); + const altRulesDir = join(pluginRoot, '.claude', 'rules'); + for (const d of [rulesDir, altRulesDir]) { + const rules = await listMarkdownFiles(d); + counts.rules += rules.length; + for (const f of rules) { + counts.totalBytes += f.size; + counts.estimatedTokens += estimateTokens(f.size, 'markdown'); + } + } + + return counts; +} + +async function listMarkdownFiles(dir) { + const out = []; + let entries; + try { entries = await readdir(dir, { withFileTypes: true }); } catch { return out; } + for (const e of entries) { + if (!e.isFile()) continue; + if (!e.name.endsWith('.md')) continue; + const full = join(dir, e.name); + try { + const s = await stat(full); + out.push({ path: full, size: s.size }); + } catch { /* skip */ } + } + return out; +} + +async function findSkillMdFiles(dir) { + const out = []; + async function walk(d, depth) { + if (depth > 3) return; + let entries; + try { entries = await readdir(d, { withFileTypes: true }); } catch { return; } + for (const e of entries) { + const full = join(d, e.name); + if (e.isDirectory()) { + await walk(full, depth + 1); + } else if (e.isFile() && /^SKILL\.md$/i.test(e.name)) { + try { + const s = await stat(full); + out.push({ path: full, size: s.size }); + } catch { /* skip */ } + } + } + } + await walk(dir, 0); + return out; +} + +// ───────────────────────────────────────────────────────────────────────── +// Skills (user + plugin) +// ───────────────────────────────────────────────────────────────────────── + +/** + * Enumerate SKILL.md files available to Claude Code: user skills under ~/.claude/skills + * plus all skills discovered via enumeratePlugins results. + * + * @param {Array<{name:string, path:string}>} pluginList + * @returns {Promise>} + */ +export async function enumerateSkills(pluginList = []) { + const home = process.env.HOME || process.env.USERPROFILE || ''; + const out = []; + + if (home) { + const userSkillsDir = join(home, '.claude', 'skills'); + const userSkills = await findSkillMdFiles(userSkillsDir); + for (const f of userSkills) { + out.push({ + name: basename(dirname(f.path)), + source: 'user', + pluginName: null, + path: f.path, + bytes: f.size, + estimatedTokens: estimateTokens(f.size, 'markdown'), + }); + } + } + + for (const p of pluginList) { + const skillsDir = join(p.path, 'skills'); + const skills = await findSkillMdFiles(skillsDir); + for (const f of skills) { + out.push({ + name: basename(dirname(f.path)), + source: 'plugin', + pluginName: p.name, + path: f.path, + bytes: f.size, + estimatedTokens: estimateTokens(f.size, 'markdown'), + }); + } + } + + return out; +} + +// ───────────────────────────────────────────────────────────────────────── +// Hooks (user + project + plugin) +// ───────────────────────────────────────────────────────────────────────── + +/** + * Read active hooks from user settings, project settings, and plugin hooks.json files. + * Does NOT dedupe — a hook loaded from two scopes is reported twice (different source). + * + * @param {string} repoPath + * @param {Array<{name:string, path:string}>} [pluginList] + * @returns {Promise>} + */ +export async function readActiveHooks(repoPath, pluginList = []) { + const home = process.env.HOME || process.env.USERPROFILE || ''; + const out = []; + + // User settings + if (home) { + const userSettings = join(home, '.claude', 'settings.json'); + await collectHooksFromSettings(userSettings, 'user', out); + } + + // Project settings + const projSettings = join(repoPath, '.claude', 'settings.json'); + const projLocal = join(repoPath, '.claude', 'settings.local.json'); + await collectHooksFromSettings(projSettings, 'project', out); + await collectHooksFromSettings(projLocal, 'local', out); + + // Plugin hooks.json + for (const p of pluginList) { + const hooksJson = join(p.path, 'hooks', 'hooks.json'); + await collectHooksFromHooksJson(hooksJson, `plugin:${p.name}`, out); + } + + return out; +} + +async function collectHooksFromSettings(settingsPath, source, out) { + let content; + try { content = await readFile(settingsPath, 'utf-8'); } catch { return; } + const parsed = parseJson(content); + if (!parsed || !parsed.hooks || typeof parsed.hooks !== 'object') return; + collectHookEntries(parsed.hooks, source, settingsPath, out); +} + +async function collectHooksFromHooksJson(hooksPath, source, out) { + let content; + try { content = await readFile(hooksPath, 'utf-8'); } catch { return; } + const parsed = parseJson(content); + if (!parsed || !parsed.hooks || typeof parsed.hooks !== 'object') return; + collectHookEntries(parsed.hooks, source, hooksPath, out); +} + +function collectHookEntries(hooksObj, source, sourcePath, out) { + for (const event of Object.keys(hooksObj)) { + const arr = hooksObj[event]; + if (!Array.isArray(arr)) continue; + for (const entry of arr) { + if (!entry) continue; + const matcher = entry.matcher || null; + const inner = Array.isArray(entry.hooks) ? entry.hooks : [entry]; + for (const h of inner) { + if (!h) continue; + out.push({ + event, + matcher, + command: h.command || h.script || '', + source, + sourcePath, + estimatedTokens: estimateTokens(0, 'item'), + }); + } + } + } +} + +// ───────────────────────────────────────────────────────────────────────── +// MCP servers (project .mcp.json + ~/.claude.json + plugin) +// ───────────────────────────────────────────────────────────────────────── + +/** + * Enumerate active MCP servers from project .mcp.json, ~/.claude.json project slice, and plugin .mcp.json. + * Honors disabledMcpjsonServers / disabledMcpServers lists. + * + * @param {string} repoPath + * @param {object} [claudeJsonSlice] - result of readClaudeJsonProjectSlice + * @param {Array<{name:string, path:string}>} [pluginList] + * @returns {Promise>} + */ +export async function readActiveMcpServers(repoPath, claudeJsonSlice = null, pluginList = []) { + const out = []; + const slice = claudeJsonSlice || await readClaudeJsonProjectSlice(repoPath); + const disabled = new Set(slice.disabledMcpjsonServers || []); + + // Project .mcp.json + const projMcp = join(repoPath, '.mcp.json'); + await collectMcpFromFile(projMcp, '.mcp.json', disabled, out); + + // ~/.claude.json project slice + for (const [name, def] of Object.entries(slice.mcpServers || {})) { + out.push({ + name, + source: '~/.claude.json:projects', + command: describeMcpCommand(def), + enabled: !disabled.has(name), + disabledBy: disabled.has(name) ? 'disabledMcpjsonServers' : null, + estimatedTokens: estimateTokens(0, 'item'), + }); + } + + // Plugin .mcp.json files + for (const p of pluginList) { + const pluginMcp = join(p.path, '.mcp.json'); + await collectMcpFromFile(pluginMcp, `plugin:${p.name}`, disabled, out); + } + + return out; +} + +async function collectMcpFromFile(path, source, disabled, out) { + let content; + try { content = await readFile(path, 'utf-8'); } catch { return; } + const parsed = parseJson(content); + if (!parsed || !parsed.mcpServers || typeof parsed.mcpServers !== 'object') return; + for (const [name, def] of Object.entries(parsed.mcpServers)) { + out.push({ + name, + source, + command: describeMcpCommand(def), + enabled: !disabled.has(name), + disabledBy: disabled.has(name) ? 'disabledMcpjsonServers' : null, + estimatedTokens: estimateTokens(0, 'item'), + }); + } +} + +function describeMcpCommand(def) { + if (!def || typeof def !== 'object') return ''; + if (def.type === 'http' || def.type === 'sse') return def.url || ''; + if (def.command) { + const args = Array.isArray(def.args) ? def.args.join(' ') : ''; + return args ? `${def.command} ${args}` : def.command; + } + return ''; +} + +// ───────────────────────────────────────────────────────────────────────── +// Settings cascade +// ───────────────────────────────────────────────────────────────────────── + +async function readSettingsCascade(repoPath) { + const home = process.env.HOME || process.env.USERPROFILE || ''; + const entries = [ + { scope: 'user', path: home ? join(home, '.claude', 'settings.json') : null }, + { scope: 'project', path: join(repoPath, '.claude', 'settings.json') }, + { scope: 'local', path: join(repoPath, '.claude', 'settings.local.json') }, + ]; + const cascade = []; + for (const e of entries) { + if (!e.path) continue; + let exists = false; + let keyCount = 0; + try { + const content = await readFile(e.path, 'utf-8'); + exists = true; + const parsed = parseJson(content); + if (parsed && typeof parsed === 'object') { + keyCount = Object.keys(parsed).length; + } + } catch { /* missing */ } + cascade.push({ scope: e.scope, path: e.path, exists, keyCount }); + } + return cascade; +} + +// ───────────────────────────────────────────────────────────────────────── +// Suggest disables (deterministic signals) +// ───────────────────────────────────────────────────────────────────────── + +function buildSuggestDisables({ plugins, skills, mcpServers, claudeMdBodies }) { + const candidates = []; + + // 1. Already disabled MCP servers + for (const m of mcpServers) { + if (!m.enabled) { + candidates.push({ + kind: 'mcp', + name: m.name, + reason: `already disabled via ${m.disabledBy || 'config'}`, + confidence: 'high', + }); + } + } + + // 2. Plugin with zero items + for (const p of plugins) { + const total = p.commands + p.agents + p.skills + p.hooks; + if (total === 0) { + candidates.push({ + kind: 'plugin', + name: p.name, + reason: 'plugin contains no commands, agents, skills, or hooks', + confidence: 'high', + }); + } + } + + // 3. Plugin unreferenced in CLAUDE.md cascade + const corpus = claudeMdBodies.join('\n').toLowerCase(); + for (const p of plugins) { + if (p.commands + p.agents + p.skills + p.hooks === 0) continue; + if (!corpus.includes(p.name.toLowerCase())) { + candidates.push({ + kind: 'plugin', + name: p.name, + reason: 'plugin name not mentioned in any CLAUDE.md in the cascade', + confidence: 'medium', + }); + } + } + + // 4. Skill from plugin whose plugin is missing + const pluginNames = new Set(plugins.map(p => p.name)); + for (const s of skills) { + if (s.source === 'plugin' && s.pluginName && !pluginNames.has(s.pluginName)) { + candidates.push({ + kind: 'skill', + name: s.name, + reason: `skill references plugin "${s.pluginName}" which is not installed`, + confidence: 'high', + }); + } + } + + return { candidates }; +} + +// ───────────────────────────────────────────────────────────────────────── +// One-shot readActiveConfig +// ───────────────────────────────────────────────────────────────────────── + +/** + * Produce a full ActiveConfig snapshot for repoPath. + * Runs component enumerators in parallel where possible. Targets <2s wall-clock. + * + * @param {string} repoPath + * @param {object} [opts] + * @param {boolean} [opts.verbose=false] + * @param {boolean} [opts.suggestDisables=false] + * @returns {Promise} see feature plan §3 for shape + */ +export async function readActiveConfig(repoPath, opts = {}) { + const start = Date.now(); + const absRepoPath = resolve(repoPath); + + const [ + gitRoot, + claudeMd, + claudeJsonSlice, + plugins, + settingsCascade, + ] = await Promise.all([ + detectGitRoot(absRepoPath), + walkClaudeMdCascade(absRepoPath), + readClaudeJsonProjectSlice(absRepoPath), + enumeratePlugins(), + readSettingsCascade(absRepoPath), + ]); + + // Skills depend on plugins + const [skills, hooks, mcpServers] = await Promise.all([ + enumerateSkills(plugins), + readActiveHooks(absRepoPath, plugins), + readActiveMcpServers(absRepoPath, claudeJsonSlice, plugins), + ]); + + // Totals + const totals = { + plugins: plugins.length, + skills: skills.length, + mcpServers: mcpServers.length, + hooks: hooks.length, + claudeMdFiles: claudeMd.files.length, + estimatedTokens: { + claudeMd: claudeMd.estimatedTokens, + plugins: plugins.reduce((s, p) => s + p.estimatedTokens, 0), + skills: skills.reduce((s, k) => s + k.estimatedTokens, 0), + mcpServers: mcpServers.reduce((s, m) => s + m.estimatedTokens, 0), + hooks: hooks.reduce((s, h) => s + h.estimatedTokens, 0), + grandTotal: 0, + }, + }; + totals.estimatedTokens.grandTotal = + totals.estimatedTokens.claudeMd + + totals.estimatedTokens.plugins + + totals.estimatedTokens.skills + + totals.estimatedTokens.mcpServers + + totals.estimatedTokens.hooks; + + const warnings = []; + + let suggestDisables = null; + if (opts.suggestDisables) { + const claudeMdBodies = await Promise.all( + claudeMd.files.map(async f => { + try { return await readFile(f.path, 'utf-8'); } catch { return ''; } + }), + ); + suggestDisables = buildSuggestDisables({ plugins, skills, mcpServers, claudeMdBodies }); + } + + const result = { + meta: { + tool: 'config-audit:whats-active', + version: SCHEMA_VERSION, + generatedAt: new Date().toISOString(), + repoPath: absRepoPath, + gitRoot, + projectKey: claudeJsonSlice.projectKey, + durationMs: Date.now() - start, + }, + claudeMd, + plugins, + skills, + mcpServers, + hooks, + settings: { cascade: settingsCascade }, + totals, + suggestDisables, + warnings, + }; + + // In non-verbose mode, drop per-file detail nobody asked for + if (!opts.verbose) { + // Keep claudeMd.files entries but strip `lines` to reduce noise. Actually + // plan says verbose adds per-file bytes/lines — so non-verbose still shows + // them in tables; we keep as-is. This block intentionally left empty. + } + + return result; +} diff --git a/plugins/config-audit/scanners/whats-active.mjs b/plugins/config-audit/scanners/whats-active.mjs new file mode 100644 index 0000000..f705015 --- /dev/null +++ b/plugins/config-audit/scanners/whats-active.mjs @@ -0,0 +1,65 @@ +#!/usr/bin/env node + +/** + * whats-active CLI — produce a read-only inventory of everything Claude Code + * loads for a given repo path. Thin shim over scanners/lib/active-config-reader.mjs. + * + * Usage: + * node whats-active.mjs [path] [--json] [--output-file ] + * [--verbose] [--suggest-disables] + * + * Exit codes: 0=ok, 3=unrecoverable error. + * Zero external dependencies. + */ + +import { resolve } from 'node:path'; +import { writeFile, stat } from 'node:fs/promises'; +import { readActiveConfig } from './lib/active-config-reader.mjs'; + +async function main() { + const args = process.argv.slice(2); + let targetPath = '.'; + let outputFile = null; + let jsonMode = false; + let verbose = false; + let suggestDisables = false; + + for (let i = 0; i < args.length; i++) { + if (args[i] === '--json') jsonMode = true; + else if (args[i] === '--verbose') verbose = true; + else if (args[i] === '--suggest-disables') suggestDisables = true; + else if (args[i] === '--output-file' && args[i + 1]) outputFile = args[++i]; + else if (!args[i].startsWith('-')) targetPath = args[i]; + } + + const absPath = resolve(targetPath); + try { + const s = await stat(absPath); + if (!s.isDirectory()) { + process.stderr.write(`Error: ${absPath} is not a directory\n`); + process.exit(3); + } + } catch { + process.stderr.write(`Error: path does not exist: ${absPath}\n`); + process.exit(3); + } + + const result = await readActiveConfig(absPath, { verbose, suggestDisables }); + const json = JSON.stringify(result, null, 2); + + if (outputFile) { + await writeFile(outputFile, json, 'utf-8'); + } + + if (jsonMode || !outputFile) { + process.stdout.write(json + '\n'); + } +} + +const isDirectRun = process.argv[1] && resolve(process.argv[1]) === resolve(new URL(import.meta.url).pathname); +if (isDirectRun) { + main().catch(err => { + process.stderr.write(`Fatal: ${err.message}\n`); + process.exit(3); + }); +} diff --git a/plugins/config-audit/tests/lib/active-config-reader.test.mjs b/plugins/config-audit/tests/lib/active-config-reader.test.mjs new file mode 100644 index 0000000..615ff25 --- /dev/null +++ b/plugins/config-audit/tests/lib/active-config-reader.test.mjs @@ -0,0 +1,596 @@ +import { describe, it, before, after, beforeEach, afterEach } from 'node:test'; +import assert from 'node:assert/strict'; +import { join, dirname, resolve } from 'node:path'; +import { mkdir, writeFile, rm, readFile } from 'node:fs/promises'; +import { tmpdir } from 'node:os'; +import { + estimateTokens, + detectGitRoot, + walkClaudeMdCascade, + readClaudeJsonProjectSlice, + enumeratePlugins, + enumerateSkills, + readActiveHooks, + readActiveMcpServers, + readActiveConfig, +} from '../../scanners/lib/active-config-reader.mjs'; + +function uniqueDir(suffix) { + return join(tmpdir(), `config-audit-acr-${suffix}-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`); +} + +/** + * Build a rich-repo fixture under `root`. + * Layout mirrors feature plan §8 — git-repo, CLAUDE.md cascade, settings layers, + * .mcp.json, fake-home with plugins + .claude.json. + */ +async function buildRichRepo(root) { + const fakeHome = join(root, 'fake-home'); + // Repo marker + await mkdir(join(root, '.git'), { recursive: true }); + await writeFile(join(root, '.git', 'HEAD'), 'ref: refs/heads/main\n'); + + // Project CLAUDE.md with @import + await mkdir(join(root, 'docs'), { recursive: true }); + await writeFile( + join(root, 'CLAUDE.md'), + '# Project Instructions\n\n@docs/conv.md\n\nBuild with care.\n', + ); + await writeFile(join(root, 'docs', 'conv.md'), '# Conventions\n\nUse conventional commits.\n'); + + // Settings cascade + await mkdir(join(root, '.claude', 'rules'), { recursive: true }); + await writeFile( + join(root, '.claude', 'settings.json'), + JSON.stringify({ + permissions: { allow: ['Read', 'Write'] }, + hooks: { + PreToolUse: [ + { matcher: 'Bash', hooks: [{ type: 'command', command: 'check.sh' }] }, + ], + }, + }, null, 2), + ); + await writeFile( + join(root, '.claude', 'settings.local.json'), + JSON.stringify({ env: { DEBUG: 'true' } }, null, 2), + ); + await writeFile(join(root, '.claude', 'rules', 'team.md'), '# Team Rule\n'); + + // Project .mcp.json + await writeFile( + join(root, '.mcp.json'), + JSON.stringify({ + mcpServers: { + alpha: { command: 'npx', args: ['alpha-server'] }, + beta: { command: 'npx', args: ['beta-server'] }, + }, + }, null, 2), + ); + + // Fake HOME — user CLAUDE.md, settings, plugins, .claude.json + await mkdir(join(fakeHome, '.claude'), { recursive: true }); + await writeFile( + join(fakeHome, '.claude', 'CLAUDE.md'), + '# User Instructions\n\nBe terse.\n', + ); + await writeFile( + join(fakeHome, '.claude', 'settings.json'), + JSON.stringify({ + hooks: { + Stop: [{ hooks: [{ type: 'command', command: 'reminder.sh' }] }], + }, + }, null, 2), + ); + + // Plugin: demo plugin with 1 command, 1 skill, 1 hook + const pluginRoot = join( + fakeHome, '.claude', 'plugins', 'marketplaces', 'mp', 'plugins', 'demo', + ); + await mkdir(join(pluginRoot, '.claude-plugin'), { recursive: true }); + await writeFile( + join(pluginRoot, '.claude-plugin', 'plugin.json'), + JSON.stringify({ name: 'demo', description: 'test plugin', version: '0.1.0' }, null, 2), + ); + await mkdir(join(pluginRoot, 'commands'), { recursive: true }); + await writeFile( + join(pluginRoot, 'commands', 'foo.md'), + '---\nname: demo:foo\ndescription: foo\nmodel: sonnet\n---\n\nFoo command.\n', + ); + await mkdir(join(pluginRoot, 'skills', 'bar'), { recursive: true }); + await writeFile( + join(pluginRoot, 'skills', 'bar', 'SKILL.md'), + '---\nname: bar\ndescription: bar skill\n---\n\nBar skill body.\n', + ); + await mkdir(join(pluginRoot, 'hooks'), { recursive: true }); + await writeFile( + join(pluginRoot, 'hooks', 'hooks.json'), + JSON.stringify({ + hooks: { + PostToolUse: [{ hooks: [{ type: 'command', command: 'demo-hook.sh' }] }], + }, + }, null, 2), + ); + + // ~/.claude.json with projects slice matching the repo root + await writeFile( + join(fakeHome, '.claude.json'), + JSON.stringify({ + projects: { + [root]: { + mcpServers: { + gamma: { command: 'gamma-server' }, + }, + disabledMcpjsonServers: ['beta'], + }, + }, + }, null, 2), + ); + + return { root, fakeHome, pluginRoot }; +} + +// ───────────────────────────────────────────────────────────────────────── +// estimateTokens +// ───────────────────────────────────────────────────────────────────────── + +describe('estimateTokens', () => { + it('markdown: 4 chars per token, rounded up', () => { + assert.equal(estimateTokens(400, 'markdown'), 100); + assert.equal(estimateTokens(401, 'markdown'), 101); + assert.equal(estimateTokens(0, 'markdown'), 0); + }); + + it('json: 3.5 chars per token, rounded up', () => { + assert.equal(estimateTokens(350, 'json'), 100); + assert.equal(estimateTokens(100, 'json'), 29); + }); + + it('frontmatter: caps at 600 bytes / 150 tokens', () => { + assert.equal(estimateTokens(100, 'frontmatter'), 25); + assert.equal(estimateTokens(600, 'frontmatter'), 150); + assert.equal(estimateTokens(10_000, 'frontmatter'), 150); + }); + + it('item: flat 15 regardless of bytes', () => { + assert.equal(estimateTokens(0, 'item'), 15); + assert.equal(estimateTokens(9999, 'item'), 15); + }); + + it('defaults to markdown when kind omitted', () => { + assert.equal(estimateTokens(400), 100); + }); + + it('handles invalid bytes gracefully', () => { + assert.equal(estimateTokens(-1, 'markdown'), 0); + assert.equal(estimateTokens(NaN, 'markdown'), 0); + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// detectGitRoot +// ───────────────────────────────────────────────────────────────────────── + +describe('detectGitRoot', () => { + let root; + before(async () => { + root = uniqueDir('git'); + await mkdir(join(root, '.git'), { recursive: true }); + await mkdir(join(root, 'src', 'deep'), { recursive: true }); + await writeFile(join(root, '.git', 'HEAD'), '\n'); + }); + after(async () => { await rm(root, { recursive: true, force: true }); }); + + it('finds .git in start dir', async () => { + const result = await detectGitRoot(root); + assert.equal(result, resolve(root)); + }); + + it('walks up to find .git', async () => { + const result = await detectGitRoot(join(root, 'src', 'deep')); + assert.equal(result, resolve(root)); + }); + + it('returns null when no .git in chain', async () => { + const noGit = uniqueDir('nogit'); + await mkdir(noGit, { recursive: true }); + try { + const result = await detectGitRoot(noGit); + // Could resolve to outer repo (the plugin repo) if tmpdir happens to be nested. + // Accept null OR a path that is NOT noGit itself. + if (result !== null) { + assert.notEqual(result, resolve(noGit)); + } + } finally { + await rm(noGit, { recursive: true, force: true }); + } + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// walkClaudeMdCascade +// ───────────────────────────────────────────────────────────────────────── + +describe('walkClaudeMdCascade', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('cascade')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('returns files in load order (user first, then project, then imports)', async () => { + const result = await walkClaudeMdCascade(fixture.root); + const scopes = result.files.map(f => f.scope); + assert.ok(scopes.includes('user'), 'expected user scope'); + assert.ok(scopes.includes('project'), 'expected project scope'); + assert.ok(scopes.includes('import'), 'expected import scope'); + + // user CLAUDE.md should come before project CLAUDE.md + const userIdx = result.files.findIndex(f => f.scope === 'user'); + const projIdx = result.files.findIndex(f => f.scope === 'project'); + assert.ok(userIdx < projIdx, 'user scope must come before project'); + }); + + it('resolves @imports and marks them with parent', async () => { + const result = await walkClaudeMdCascade(fixture.root); + const imp = result.files.find(f => f.path.endsWith('docs/conv.md')); + assert.ok(imp, 'import should be discovered'); + assert.equal(imp.scope, 'import'); + assert.ok(imp.parent && imp.parent.endsWith('CLAUDE.md')); + }); + + it('counts bytes and lines', async () => { + const result = await walkClaudeMdCascade(fixture.root); + assert.ok(result.totalBytes > 0); + assert.ok(result.totalLines > 0); + for (const f of result.files) { + assert.ok(f.bytes > 0); + assert.ok(f.lines > 0); + } + }); + + it('computes estimatedTokens via markdown heuristic', async () => { + const result = await walkClaudeMdCascade(fixture.root); + assert.equal(result.estimatedTokens, Math.ceil(result.totalBytes / 4)); + }); + + it('handles missing user CLAUDE.md gracefully', async () => { + // Remove user CLAUDE.md + await rm(join(fixture.fakeHome, '.claude', 'CLAUDE.md')); + const result = await walkClaudeMdCascade(fixture.root); + const userFiles = result.files.filter(f => f.scope === 'user'); + assert.equal(userFiles.length, 0); + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// readClaudeJsonProjectSlice +// ───────────────────────────────────────────────────────────────────────── + +describe('readClaudeJsonProjectSlice', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('slice')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('finds exact-match project key', async () => { + const slice = await readClaudeJsonProjectSlice(fixture.root); + assert.equal(slice.projectKey, fixture.root); + assert.deepEqual(slice.disabledMcpjsonServers, ['beta']); + assert.ok('gamma' in slice.mcpServers); + }); + + it('returns empty slice when no .claude.json exists', async () => { + await rm(join(fixture.fakeHome, '.claude.json')); + const slice = await readClaudeJsonProjectSlice(fixture.root); + assert.equal(slice.projectKey, null); + assert.deepEqual(slice.mcpServers, {}); + }); + + it('longest-prefix match: deeper key wins over shallower', async () => { + // Rewrite .claude.json with two keys — ancestor and the repo + const parent = dirname(fixture.root); + const content = JSON.stringify({ + projects: { + [parent]: { mcpServers: { shallow: { command: 'shallow' } } }, + [fixture.root]: { mcpServers: { deep: { command: 'deep' } } }, + }, + }, null, 2); + await writeFile(join(fixture.fakeHome, '.claude.json'), content); + + const slice = await readClaudeJsonProjectSlice(fixture.root); + assert.equal(slice.projectKey, fixture.root); + assert.ok('deep' in slice.mcpServers); + assert.ok(!('shallow' in slice.mcpServers)); + }); + + it('ancestor prefix matches when target is a subdir of a key', async () => { + const parent = dirname(fixture.root); + await writeFile( + join(fixture.fakeHome, '.claude.json'), + JSON.stringify({ projects: { [parent]: { mcpServers: { anc: {} } } } }, null, 2), + ); + const slice = await readClaudeJsonProjectSlice(fixture.root); + assert.equal(slice.projectKey, parent); + }); + + it('returns null projectKey when no key matches', async () => { + await writeFile( + join(fixture.fakeHome, '.claude.json'), + JSON.stringify({ projects: { '/some/other/path': {} } }, null, 2), + ); + const slice = await readClaudeJsonProjectSlice(fixture.root); + assert.equal(slice.projectKey, null); + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// enumeratePlugins +// ───────────────────────────────────────────────────────────────────────── + +describe('enumeratePlugins', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('plugins')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('discovers plugin and reads plugin.json version', async () => { + const plugins = await enumeratePlugins(); + assert.ok(plugins.length >= 1); + const demo = plugins.find(p => p.name === 'demo'); + assert.ok(demo, 'demo plugin should be discovered'); + assert.equal(demo.version, '0.1.0'); + }); + + it('counts commands, skills, hooks', async () => { + const plugins = await enumeratePlugins(); + const demo = plugins.find(p => p.name === 'demo'); + assert.equal(demo.commands, 1); + assert.equal(demo.skills, 1); + assert.equal(demo.hooks, 1); + }); + + it('returns empty array when HOME has no plugins', async () => { + process.env.HOME = uniqueDir('empty'); + await mkdir(process.env.HOME, { recursive: true }); + try { + const plugins = await enumeratePlugins(); + assert.deepEqual(plugins, []); + } finally { + await rm(process.env.HOME, { recursive: true, force: true }); + } + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// enumerateSkills +// ───────────────────────────────────────────────────────────────────────── + +describe('enumerateSkills', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('skills')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('finds plugin skills', async () => { + const plugins = await enumeratePlugins(); + const skills = await enumerateSkills(plugins); + const bar = skills.find(s => s.name === 'bar'); + assert.ok(bar, 'plugin skill should be discovered'); + assert.equal(bar.source, 'plugin'); + assert.equal(bar.pluginName, 'demo'); + }); + + it('finds user skills', async () => { + // Add a user skill + await mkdir(join(fixture.fakeHome, '.claude', 'skills', 'userskill'), { recursive: true }); + await writeFile( + join(fixture.fakeHome, '.claude', 'skills', 'userskill', 'SKILL.md'), + '# user skill\n', + ); + const skills = await enumerateSkills([]); + const userSkill = skills.find(s => s.name === 'userskill'); + assert.ok(userSkill, 'user skill should be discovered'); + assert.equal(userSkill.source, 'user'); + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// readActiveHooks +// ───────────────────────────────────────────────────────────────────────── + +describe('readActiveHooks', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('hooks')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('merges hooks from user + project + plugin', async () => { + const plugins = await enumeratePlugins(); + const hooks = await readActiveHooks(fixture.root, plugins); + const sources = new Set(hooks.map(h => h.source)); + assert.ok(sources.has('user'), 'user hook present'); + assert.ok(sources.has('project'), 'project hook present'); + assert.ok([...sources].some(s => s.startsWith('plugin:')), 'plugin hook present'); + }); + + it('does not dedupe across scopes', async () => { + // Add duplicate hook in user and project settings + const dupeHook = { + hooks: { PreToolUse: [{ matcher: 'Bash', hooks: [{ type: 'command', command: 'same.sh' }] }] }, + }; + await writeFile(join(fixture.fakeHome, '.claude', 'settings.json'), JSON.stringify(dupeHook)); + await writeFile(join(fixture.root, '.claude', 'settings.json'), JSON.stringify(dupeHook)); + const hooks = await readActiveHooks(fixture.root, []); + const sameCmd = hooks.filter(h => h.command === 'same.sh'); + assert.equal(sameCmd.length, 2, 'should report both occurrences'); + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// readActiveMcpServers +// ───────────────────────────────────────────────────────────────────────── + +describe('readActiveMcpServers', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('mcp')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('merges project .mcp.json + .claude.json slice', async () => { + const servers = await readActiveMcpServers(fixture.root); + const names = servers.map(s => s.name); + assert.ok(names.includes('alpha'), 'alpha from project'); + assert.ok(names.includes('beta'), 'beta from project'); + assert.ok(names.includes('gamma'), 'gamma from .claude.json'); + }); + + it('honors disabledMcpjsonServers', async () => { + const servers = await readActiveMcpServers(fixture.root); + const beta = servers.find(s => s.name === 'beta'); + assert.equal(beta.enabled, false); + assert.equal(beta.disabledBy, 'disabledMcpjsonServers'); + + const alpha = servers.find(s => s.name === 'alpha'); + assert.equal(alpha.enabled, true); + assert.equal(alpha.disabledBy, null); + }); +}); + +// ───────────────────────────────────────────────────────────────────────── +// readActiveConfig (integration) +// ───────────────────────────────────────────────────────────────────────── + +describe('readActiveConfig (integration)', () => { + let fixture; + let originalHome; + + beforeEach(async () => { + fixture = await buildRichRepo(uniqueDir('full')); + originalHome = process.env.HOME; + process.env.HOME = fixture.fakeHome; + }); + afterEach(async () => { + process.env.HOME = originalHome; + await rm(fixture.root, { recursive: true, force: true }); + }); + + it('produces expected top-level shape', async () => { + const result = await readActiveConfig(fixture.root); + const keys = Object.keys(result).sort(); + assert.deepEqual(keys, [ + 'claudeMd', 'hooks', 'mcpServers', 'meta', 'plugins', + 'settings', 'skills', 'suggestDisables', 'totals', 'warnings', + ]); + }); + + it('meta contains required fields', async () => { + const result = await readActiveConfig(fixture.root); + assert.equal(result.meta.tool, 'config-audit:whats-active'); + assert.equal(result.meta.version, '1.0.0'); + assert.ok(typeof result.meta.generatedAt === 'string'); + assert.equal(result.meta.repoPath, resolve(fixture.root)); + assert.equal(result.meta.gitRoot, resolve(fixture.root)); + assert.equal(result.meta.projectKey, fixture.root); + assert.ok(typeof result.meta.durationMs === 'number'); + }); + + it('settings cascade reflects all three layers', async () => { + const result = await readActiveConfig(fixture.root); + const scopes = result.settings.cascade.map(c => c.scope); + assert.deepEqual(scopes, ['user', 'project', 'local']); + const user = result.settings.cascade.find(c => c.scope === 'user'); + const project = result.settings.cascade.find(c => c.scope === 'project'); + assert.equal(user.exists, true); + assert.equal(project.exists, true); + }); + + it('totals.grandTotal equals sum of category subtotals', async () => { + const result = await readActiveConfig(fixture.root); + const t = result.totals.estimatedTokens; + assert.equal(t.grandTotal, t.claudeMd + t.plugins + t.skills + t.mcpServers + t.hooks); + }); + + it('performance budget: durationMs < 2000', async () => { + const result = await readActiveConfig(fixture.root); + assert.ok(result.meta.durationMs < 2000, + `expected < 2000ms, got ${result.meta.durationMs}ms`); + }); + + it('token estimate within ±20% of hand-computed value', async () => { + const result = await readActiveConfig(fixture.root); + const expectedClaudeMd = Math.ceil(result.claudeMd.totalBytes / 4); + const low = Math.floor(expectedClaudeMd * 0.8); + const high = Math.ceil(expectedClaudeMd * 1.2); + assert.ok( + result.totals.estimatedTokens.claudeMd >= low && + result.totals.estimatedTokens.claudeMd <= high, + `claudeMd tokens ${result.totals.estimatedTokens.claudeMd} outside [${low}, ${high}]`, + ); + }); + + it('suggestDisables is null by default, object when flag set', async () => { + const noFlag = await readActiveConfig(fixture.root); + assert.equal(noFlag.suggestDisables, null); + + const withFlag = await readActiveConfig(fixture.root, { suggestDisables: true }); + assert.ok(withFlag.suggestDisables && Array.isArray(withFlag.suggestDisables.candidates)); + }); + + it('suggestDisables flags disabled MCP servers', async () => { + const result = await readActiveConfig(fixture.root, { suggestDisables: true }); + const betaCandidate = result.suggestDisables.candidates.find( + c => c.kind === 'mcp' && c.name === 'beta', + ); + assert.ok(betaCandidate, 'beta should be flagged as already disabled'); + assert.equal(betaCandidate.confidence, 'high'); + }); +});