feat(config-audit): --accurate-tokens API calibration (v5 N5) [skip-docs]

This commit is contained in:
Kjell Tore Guttormsen 2026-05-01 09:15:02 +02:00
commit b7414303de
3 changed files with 386 additions and 3 deletions

View file

@ -0,0 +1,126 @@
/**
* tokenizer-api.mjs wrapper around Anthropic's count_tokens API for
* --accurate-tokens calibration.
*
* Surface:
* callCountTokensApi(text, apiKey, options)
* Promise<{ input_tokens: number }>
*
* Security:
* - API key is masked to first 8 chars + "..." in ALL error messages and
* ALL thrown errors.
* - Response body is NEVER included in thrown errors (may echo the key).
* - Logs go to stderr only on caller request this module throws, doesn't log.
*
* Reliability:
* - 5-second AbortController timeout per request.
* - Exponential backoff on HTTP 429 (max 3 retries: 1s, 2s, 4s by default).
* - Non-429 HTTP errors throw immediately with status code only.
*
* Zero external dependencies. Requires globalThis.fetch (Node 18+).
*/
const ENDPOINT = 'https://api.anthropic.com/v1/messages/count_tokens';
const ANTHROPIC_VERSION = '2023-06-01';
const TIMEOUT_MS = 5000;
const DEFAULT_MAX_RETRIES = 3;
const DEFAULT_BACKOFF_BASE_MS = 1000;
/**
* Mask an API key to its first 8 characters plus "..." for safe logging.
* Always pass user-provided strings through this before including them in
* error messages.
*/
export function maskKey(apiKey) {
if (typeof apiKey !== 'string' || apiKey.length === 0) {
return '<missing>';
}
return `${apiKey.slice(0, 8)}...`;
}
function sleep(ms) {
return new Promise(r => setTimeout(r, ms));
}
/**
* Call Anthropic's count_tokens API for a single text payload.
* Uses claude-haiku-4-5 as the model count_tokens requires a model param
* but token counts are tokenizer-driven, not model-driven for input counting.
*
* @param {string} text the content to count
* @param {string} apiKey Anthropic API key
* @param {object} [options]
* @param {number} [options.maxRetries=3]
* @param {number} [options.backoffBaseMs=1000] base for exponential backoff
* @param {string} [options.model='claude-haiku-4-5']
* @returns {Promise<{input_tokens: number}>}
*/
export async function callCountTokensApi(text, apiKey, options = {}) {
const maxRetries = options.maxRetries ?? DEFAULT_MAX_RETRIES;
const backoffBaseMs = options.backoffBaseMs ?? DEFAULT_BACKOFF_BASE_MS;
const model = options.model ?? 'claude-haiku-4-5';
if (typeof globalThis.fetch !== 'function') {
throw new Error('fetch is not available — Node.js >= 18 required for --accurate-tokens');
}
const masked = maskKey(apiKey);
const body = JSON.stringify({
model,
messages: [{ role: 'user', content: text }],
});
let attempt = 0;
while (true) {
const controller = new AbortController();
const timeoutHandle = setTimeout(() => controller.abort(), TIMEOUT_MS);
let response;
try {
response = await globalThis.fetch(ENDPOINT, {
method: 'POST',
headers: {
'x-api-key': apiKey,
'anthropic-version': ANTHROPIC_VERSION,
'content-type': 'application/json',
},
body,
signal: controller.signal,
});
} catch (err) {
clearTimeout(timeoutHandle);
// Network or abort error. Mask key in re-thrown error. Do NOT propagate
// the original error object — its `cause`/properties may include the
// request init we passed.
const reason = err && err.name === 'AbortError'
? 'request aborted (timeout 5s)'
: (err && err.message ? `network error: ${err.message}` : 'network error');
throw new Error(`count_tokens API failed (key ${masked}): ${reason}`);
}
clearTimeout(timeoutHandle);
if (response.ok) {
let data;
try {
data = await response.json();
} catch {
throw new Error(`count_tokens API failed (key ${masked}): malformed JSON response`);
}
if (typeof data?.input_tokens !== 'number') {
throw new Error(`count_tokens API failed (key ${masked}): missing input_tokens in response`);
}
return { input_tokens: data.input_tokens };
}
if (response.status === 429 && attempt < maxRetries) {
const wait = backoffBaseMs * Math.pow(2, attempt);
attempt++;
await sleep(wait);
continue;
}
// Non-retryable HTTP error. Body deliberately NOT included — it may echo
// the API key on auth failures.
throw new Error(`count_tokens API failed (key ${masked}): HTTP ${response.status}`);
}
}

View file

@ -6,22 +6,46 @@
*
* Usage:
* node token-hotspots-cli.mjs [path] [--json] [--output-file <path>] [--global]
* [--with-telemetry-recipe]
* [--with-telemetry-recipe] [--accurate-tokens]
*
* Exit codes: 0=ok, 3=unrecoverable error.
* Zero external dependencies.
*/
import { resolve, dirname, join } from 'node:path';
import { resolve, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import { writeFile, stat } from 'node:fs/promises';
import { writeFile, readFile, stat } from 'node:fs/promises';
import { discoverConfigFiles } from './lib/file-discovery.mjs';
import { resetCounter } from './lib/output.mjs';
import { scan } from './token-hotspots.mjs';
import * as tokenizerApi from './lib/tokenizer-api.mjs';
const __dirname = dirname(fileURLToPath(import.meta.url));
const TELEMETRY_RECIPE_PATH = resolve(__dirname, '..', 'knowledge', 'cache-telemetry-recipe.md');
const ACCURATE_TOKENS_SAMPLE_SIZE = 3;
async function calibrateAgainstApi(hotspots, apiKey) {
const sampled = hotspots.slice(0, ACCURATE_TOKENS_SAMPLE_SIZE);
let actualTokens = 0;
for (const hotspot of sampled) {
if (!hotspot?.path) continue;
let content;
try {
content = await readFile(hotspot.path, 'utf-8');
} catch {
continue;
}
const result = await tokenizerApi.callCountTokensApi(content, apiKey);
actualTokens += result.input_tokens;
}
return {
actual_tokens: actualTokens,
source: 'count_tokens_api',
sampled_hotspots: sampled.length,
};
}
async function main() {
const args = process.argv.slice(2);
let targetPath = '.';
@ -29,11 +53,13 @@ async function main() {
let jsonMode = false;
let includeGlobal = false;
let withTelemetryRecipe = false;
let accurateTokens = false;
for (let i = 0; i < args.length; i++) {
if (args[i] === '--json') jsonMode = true;
else if (args[i] === '--global') includeGlobal = true;
else if (args[i] === '--with-telemetry-recipe') withTelemetryRecipe = true;
else if (args[i] === '--accurate-tokens') accurateTokens = true;
else if (args[i] === '--output-file' && args[i + 1]) outputFile = args[++i];
else if (!args[i].startsWith('-')) targetPath = args[i];
}
@ -69,6 +95,22 @@ async function main() {
payload.telemetry_recipe_path = TELEMETRY_RECIPE_PATH;
}
if (accurateTokens) {
const apiKey = process.env.ANTHROPIC_API_KEY;
if (!apiKey || apiKey.length === 0) {
process.stderr.write('ANTHROPIC_API_KEY not set — skipping API calibration\n');
payload.calibration = { skipped: 'no-api-key' };
} else {
try {
payload.calibration = await calibrateAgainstApi(result.hotspots || [], apiKey);
} catch (err) {
// Error message is already key-masked by tokenizer-api.mjs.
process.stderr.write(`Calibration error: ${err.message}\n`);
payload.calibration = { skipped: 'api-error', error: err.message };
}
}
}
const json = JSON.stringify(payload, null, 2);
if (outputFile) {

View file

@ -0,0 +1,215 @@
import { describe, it } from 'node:test';
import assert from 'node:assert/strict';
import { resolve } from 'node:path';
import { fileURLToPath } from 'node:url';
import { execFile } from 'node:child_process';
import { promisify } from 'node:util';
const exec = promisify(execFile);
const __dirname = fileURLToPath(new URL('.', import.meta.url));
const REPO = resolve(__dirname, '../..');
const CLI = resolve(REPO, 'scanners/token-hotspots-cli.mjs');
const TOKENIZER_MODULE = resolve(REPO, 'scanners/lib/tokenizer-api.mjs');
const FIXTURE = resolve(REPO, 'tests/fixtures/marketplace-large');
describe('--accurate-tokens (no API key)', () => {
it('skips API calibration and reports calibration.skipped === "no-api-key"', async () => {
const env = { ...process.env };
delete env.ANTHROPIC_API_KEY;
const { stdout, stderr } = await exec(
'node',
[CLI, FIXTURE, '--json', '--accurate-tokens'],
{ timeout: 30000, cwd: REPO, env },
);
const json = JSON.parse(stdout);
assert.equal(json.calibration?.skipped, 'no-api-key');
assert.match(stderr, /ANTHROPIC_API_KEY not set/i);
});
it('does not include calibration field when --accurate-tokens absent', async () => {
const { stdout } = await exec('node', [CLI, FIXTURE, '--json'], {
timeout: 30000,
cwd: REPO,
});
const json = JSON.parse(stdout);
assert.equal(json.calibration, undefined);
});
});
describe('tokenizer-api.mjs — key masking', () => {
it('masks API key in error messages to first 8 chars + "..."', async () => {
const tokenizerApi = await import(TOKENIZER_MODULE);
const fakeKey = 'sk-ant-FAKEKEY-1234567890';
const originalFetch = globalThis.fetch;
globalThis.fetch = async () => {
const err = new Error('network failure');
throw err;
};
let threw = null;
try {
await tokenizerApi.callCountTokensApi('hello', fakeKey, { maxRetries: 0 });
} catch (e) {
threw = e;
} finally {
globalThis.fetch = originalFetch;
}
assert.ok(threw, 'expected an error to be thrown');
assert.ok(
!threw.message.includes('FAKEKEY-1234567890'),
`key must NOT appear unmasked in error message; got: ${threw.message}`,
);
assert.ok(
threw.message.includes('sk-ant-F'),
`error must mention masked key prefix sk-ant-F...; got: ${threw.message}`,
);
});
it('does NOT include response body in thrown errors on non-429 HTTP failure', async () => {
const tokenizerApi = await import(TOKENIZER_MODULE);
const fakeKey = 'sk-ant-LEAKYBODY-9999';
const echoBody = `{"error": "invalid api key sk-ant-LEAKYBODY-9999"}`;
const originalFetch = globalThis.fetch;
globalThis.fetch = async () => ({
ok: false,
status: 401,
statusText: 'Unauthorized',
text: async () => echoBody,
json: async () => JSON.parse(echoBody),
});
let threw = null;
try {
await tokenizerApi.callCountTokensApi('hi', fakeKey, { maxRetries: 0 });
} catch (e) {
threw = e;
} finally {
globalThis.fetch = originalFetch;
}
assert.ok(threw);
assert.ok(
!threw.message.includes('LEAKYBODY-9999'),
`body must NOT echo back into thrown message; got: ${threw.message}`,
);
assert.match(threw.message, /401/);
});
it('uses AbortController with a 5-second timeout', async () => {
const tokenizerApi = await import(TOKENIZER_MODULE);
const fakeKey = 'sk-ant-TIMEOUTKEY-0000';
let capturedSignal = null;
const originalFetch = globalThis.fetch;
globalThis.fetch = async (_url, init) => {
capturedSignal = init?.signal;
return {
ok: true,
status: 200,
statusText: 'OK',
json: async () => ({ input_tokens: 42 }),
};
};
try {
const result = await tokenizerApi.callCountTokensApi('hi', fakeKey, { maxRetries: 0 });
assert.equal(result.input_tokens, 42);
assert.ok(capturedSignal, 'fetch must be called with an AbortController signal');
assert.ok(typeof capturedSignal.aborted === 'boolean');
} finally {
globalThis.fetch = originalFetch;
}
});
it('retries on 429 with exponential backoff (max 3 retries)', async () => {
const tokenizerApi = await import(TOKENIZER_MODULE);
const fakeKey = 'sk-ant-RETRYKEY-0000';
let calls = 0;
const originalFetch = globalThis.fetch;
globalThis.fetch = async () => {
calls++;
if (calls <= 2) {
return {
ok: false,
status: 429,
statusText: 'Too Many Requests',
text: async () => '',
json: async () => ({}),
};
}
return {
ok: true,
status: 200,
statusText: 'OK',
json: async () => ({ input_tokens: 100 }),
};
};
try {
const result = await tokenizerApi.callCountTokensApi('hello', fakeKey, {
maxRetries: 3,
backoffBaseMs: 1,
});
assert.equal(result.input_tokens, 100);
assert.equal(calls, 3, 'expected 2 retries before success on third call');
} finally {
globalThis.fetch = originalFetch;
}
});
it('sends required headers: x-api-key, anthropic-version, content-type', async () => {
const tokenizerApi = await import(TOKENIZER_MODULE);
const fakeKey = 'sk-ant-HEADERTEST-0000';
let capturedInit = null;
const originalFetch = globalThis.fetch;
globalThis.fetch = async (_url, init) => {
capturedInit = init;
return {
ok: true,
status: 200,
statusText: 'OK',
json: async () => ({ input_tokens: 10 }),
};
};
try {
await tokenizerApi.callCountTokensApi('hi', fakeKey, { maxRetries: 0 });
const headers = capturedInit?.headers || {};
assert.equal(headers['x-api-key'], fakeKey);
assert.equal(headers['anthropic-version'], '2023-06-01');
assert.equal(headers['content-type'], 'application/json');
} finally {
globalThis.fetch = originalFetch;
}
});
});
describe('--accurate-tokens (mocked fetch — happy path)', () => {
it('returns input_tokens from mocked fetch response', async () => {
// Note: the v5 plan specified `mock.method(tokenizerApi, ...)` but ESM
// read-only bindings make that pattern unusable. We mock at the
// globalThis.fetch boundary instead, which is the actual external
// dependency and gives equivalent coverage. Subprocess CLI integration
// can't carry the mock across processes, so unit-level fetch mock + the
// no-key subprocess test are the two coverage points.
const tokenizerApi = await import(TOKENIZER_MODULE);
const fakeKey = 'sk-ant-MOCKED-0000';
const originalFetch = globalThis.fetch;
globalThis.fetch = async () => ({
ok: true,
status: 200,
statusText: 'OK',
json: async () => ({ input_tokens: 4200 }),
});
try {
const result = await tokenizerApi.callCountTokensApi('hello world', fakeKey, { maxRetries: 0 });
assert.equal(result.input_tokens, 4200);
} finally {
globalThis.fetch = originalFetch;
}
});
});