Created
March 20, 2026 20:52
-
-
Save Munksgaard/ec4f52483465ef2f58557c459defa379 to your computer and use it in GitHub Desktop.
Put it in ~/.pi/agent/extensions/hashline/
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /** | |
| * Hashline Extension — Replaces the built-in `edit` tool with a hashline-based | |
| * edit tool, and wraps `read` to tag every line with `LINE#ID` content hashes. | |
| * | |
| * Inspired by https://blog.can.ac/2026/02/12/the-harness-problem/ | |
| * and the oh-my-pi implementation by Can Bölük. | |
| * | |
| * The model references lines by their `LINE#ID` tag instead of reproducing | |
| * exact old text. If the file changes, hash mismatches are caught before | |
| * any mutation occurs. | |
| */ | |
| import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; | |
| import { | |
| truncateHead, | |
| DEFAULT_MAX_BYTES, | |
| DEFAULT_MAX_LINES, | |
| formatSize, | |
| } from "@mariozechner/pi-coding-agent"; | |
| import { Type } from "@sinclair/typebox"; | |
| import { StringEnum } from "@mariozechner/pi-ai"; | |
| import { readFile, writeFile, access } from "fs/promises"; | |
| import { constants } from "fs"; | |
| import { resolve, extname } from "path"; | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // xxHash32 — Pure JavaScript implementation | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| const PRIME32_1 = 0x9e3779b1; | |
| const PRIME32_2 = 0x85ebca77; | |
| const PRIME32_3 = 0xc2b2ae3d; | |
| const PRIME32_4 = 0x27d4eb2f; | |
| const PRIME32_5 = 0x165667b1; | |
| function rotl32(x: number, r: number): number { | |
| return ((x << r) | (x >>> (32 - r))) >>> 0; | |
| } | |
| function xxHash32(input: string, seed: number = 0): number { | |
| const buf = Buffer.from(input, "utf-8"); | |
| const len = buf.length; | |
| let h32: number; | |
| let i = 0; | |
| if (len >= 16) { | |
| let v1 = (seed + PRIME32_1 + PRIME32_2) >>> 0; | |
| let v2 = (seed + PRIME32_2) >>> 0; | |
| let v3 = (seed + 0) >>> 0; | |
| let v4 = (seed - PRIME32_1) >>> 0; | |
| while (i <= len - 16) { | |
| v1 = Math.imul(rotl32((v1 + Math.imul(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1) >>> 0; | |
| i += 4; | |
| v2 = Math.imul(rotl32((v2 + Math.imul(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1) >>> 0; | |
| i += 4; | |
| v3 = Math.imul(rotl32((v3 + Math.imul(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1) >>> 0; | |
| i += 4; | |
| v4 = Math.imul(rotl32((v4 + Math.imul(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1) >>> 0; | |
| i += 4; | |
| } | |
| h32 = (rotl32(v1, 1) + rotl32(v2, 7) + rotl32(v3, 12) + rotl32(v4, 18)) >>> 0; | |
| } else { | |
| h32 = (seed + PRIME32_5) >>> 0; | |
| } | |
| h32 = (h32 + len) >>> 0; | |
| while (i <= len - 4) { | |
| h32 = Math.imul(rotl32((h32 + Math.imul(buf.readUInt32LE(i), PRIME32_3)) >>> 0, 17), PRIME32_4) >>> 0; | |
| i += 4; | |
| } | |
| while (i < len) { | |
| h32 = Math.imul(rotl32((h32 + Math.imul(buf[i], PRIME32_5)) >>> 0, 11), PRIME32_1) >>> 0; | |
| i += 1; | |
| } | |
| h32 = Math.imul(h32 ^ (h32 >>> 15), PRIME32_2) >>> 0; | |
| h32 = Math.imul(h32 ^ (h32 >>> 13), PRIME32_3) >>> 0; | |
| h32 = (h32 ^ (h32 >>> 16)) >>> 0; | |
| return h32; | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Hashline Core | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| const NIBBLE_STR = "ZPMQVRWSNKTXJBYH"; | |
| const DICT: string[] = Array.from({ length: 256 }, (_, i) => { | |
| const h = i >>> 4; | |
| const l = i & 0x0f; | |
| return `${NIBBLE_STR[h]}${NIBBLE_STR[l]}`; | |
| }); | |
| const RE_SIGNIFICANT = /[\p{L}\p{N}]/u; | |
| function computeLineHash(idx: number, line: string): string { | |
| line = line.replace(/\r/g, "").trimEnd(); | |
| let seed = 0; | |
| if (!RE_SIGNIFICANT.test(line)) { | |
| seed = idx; | |
| } | |
| return DICT[xxHash32(line, seed) & 0xff]; | |
| } | |
| function formatLineTag(line: number, text: string): string { | |
| return `${line}#${computeLineHash(line, text)}`; | |
| } | |
| function formatHashLines(text: string, startLine = 1): string { | |
| const lines = text.split("\n"); | |
| return lines | |
| .map((line, i) => { | |
| const num = startLine + i; | |
| return `${formatLineTag(num, line)}:${line}`; | |
| }) | |
| .join("\n"); | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Anchor / Tag parsing | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| interface Anchor { | |
| line: number; | |
| hash: string; | |
| } | |
| function parseTag(ref: string): Anchor { | |
| const match = ref.match(/^\s*[>+-]*\s*(\d+)\s*#\s*([ZPMQVRWSNKTXJBYH]{2})/); | |
| if (!match) { | |
| throw new Error(`Invalid line reference "${ref}". Expected format "LINE#ID" (e.g. "5#VR").`); | |
| } | |
| const line = parseInt(match[1], 10); | |
| if (line < 1) { | |
| throw new Error(`Line number must be >= 1, got ${line} in "${ref}".`); | |
| } | |
| return { line, hash: match[2] }; | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Hash Mismatch Error | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| interface HashMismatch { | |
| line: number; | |
| expected: string; | |
| actual: string; | |
| } | |
| const MISMATCH_CONTEXT = 2; | |
| function formatMismatchMessage(mismatches: HashMismatch[], fileLines: string[]): string { | |
| const mismatchSet = new Map<number, HashMismatch>(); | |
| for (const m of mismatches) { | |
| mismatchSet.set(m.line, m); | |
| } | |
| const displayLines = new Set<number>(); | |
| for (const m of mismatches) { | |
| const lo = Math.max(1, m.line - MISMATCH_CONTEXT); | |
| const hi = Math.min(fileLines.length, m.line + MISMATCH_CONTEXT); | |
| for (let i = lo; i <= hi; i++) { | |
| displayLines.add(i); | |
| } | |
| } | |
| const sorted = [...displayLines].sort((a, b) => a - b); | |
| const lines: string[] = []; | |
| lines.push( | |
| `${mismatches.length} line${mismatches.length > 1 ? "s have" : " has"} changed since last read. Use the updated LINE#ID references shown below (>>> marks changed lines).`, | |
| ); | |
| lines.push(""); | |
| let prevLine = -1; | |
| for (const lineNum of sorted) { | |
| if (prevLine !== -1 && lineNum > prevLine + 1) { | |
| lines.push(" ..."); | |
| } | |
| prevLine = lineNum; | |
| const text = fileLines[lineNum - 1]; | |
| const hash = computeLineHash(lineNum, text); | |
| const prefix = `${lineNum}#${hash}`; | |
| if (mismatchSet.has(lineNum)) { | |
| lines.push(`>>> ${prefix}:${text}`); | |
| } else { | |
| lines.push(` ${prefix}:${text}`); | |
| } | |
| } | |
| return lines.join("\n"); | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Edit types and application | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| interface HashlineEdit { | |
| op: "replace" | "append" | "prepend"; | |
| pos?: Anchor; | |
| end?: Anchor; | |
| lines: string[]; | |
| } | |
| function stripNewLinePrefixes(lines: string[]): string[] { | |
| if (lines.length === 0) return lines; | |
| // Check for hashline prefixes (N#XX:content) | |
| const HASHLINE_PREFIX_RE = /^(\d+)#([ZPMQVRWSNKTXJBYH]{2}):(.*)/; | |
| const HASH_ONLY_PREFIX_RE = /^#([ZPMQVRWSNKTXJBYH]{2}):(.*)/; | |
| let nonEmpty = 0; | |
| let hashPrefixCount = 0; | |
| let hashOnlyPrefixCount = 0; | |
| let plusPrefixCount = 0; | |
| for (const line of lines) { | |
| if (line.length === 0) continue; | |
| nonEmpty++; | |
| if (HASHLINE_PREFIX_RE.test(line)) hashPrefixCount++; | |
| else if (HASH_ONLY_PREFIX_RE.test(line)) hashOnlyPrefixCount++; | |
| if (/^\+(?!\+)/.test(line)) plusPrefixCount++; | |
| } | |
| // Strip hashline prefixes if all non-empty lines have them | |
| if (nonEmpty > 0 && hashPrefixCount === nonEmpty) { | |
| return lines.map((line) => { | |
| const m = HASHLINE_PREFIX_RE.exec(line); | |
| return m ? m[3] : line; | |
| }); | |
| } | |
| if (nonEmpty > 0 && hashOnlyPrefixCount === nonEmpty) { | |
| return lines.map((line) => { | |
| const m = HASH_ONLY_PREFIX_RE.exec(line); | |
| return m ? m[2] : line; | |
| }); | |
| } | |
| // Strip diff '+' markers if majority have them | |
| if (nonEmpty > 0 && plusPrefixCount / nonEmpty >= 0.5) { | |
| return lines.map((line) => { | |
| if (/^\+(?!\+)/.test(line)) return line.slice(1); | |
| return line; | |
| }); | |
| } | |
| return lines; | |
| } | |
| function parseEditLines(input: string[] | string | null | undefined): string[] { | |
| if (input === null || input === undefined) return []; | |
| if (typeof input === "string") { | |
| if (input === "") return [""]; | |
| const lines = input.split("\n"); | |
| // Trim trailing empty from string split | |
| if (lines.length > 0 && lines[lines.length - 1] === "") { | |
| lines.pop(); | |
| } | |
| return stripNewLinePrefixes(lines); | |
| } | |
| return stripNewLinePrefixes(input); | |
| } | |
| const MIN_AUTOCORRECT_LENGTH = 2; | |
| function shouldAutocorrect(line: string, otherLine: string): boolean { | |
| if (!line || line !== otherLine) return false; | |
| line = line.trim(); | |
| if (line.length < MIN_AUTOCORRECT_LENGTH) { | |
| return line.endsWith("}") || line.endsWith(")"); | |
| } | |
| return true; | |
| } | |
| function applyHashlineEdits( | |
| text: string, | |
| edits: HashlineEdit[], | |
| ): { | |
| result: string; | |
| firstChangedLine: number | undefined; | |
| warnings: string[]; | |
| } { | |
| if (edits.length === 0) { | |
| return { result: text, firstChangedLine: undefined, warnings: [] }; | |
| } | |
| const fileLines = text.split("\n"); | |
| const originalFileLines = [...fileLines]; | |
| let firstChangedLine: number | undefined; | |
| const warnings: string[] = []; | |
| // Pre-validate: collect all hash mismatches before mutating | |
| const mismatches: HashMismatch[] = []; | |
| function validateRef(ref: Anchor): boolean { | |
| if (ref.line < 1 || ref.line > fileLines.length) { | |
| throw new Error(`Line ${ref.line} does not exist (file has ${fileLines.length} lines)`); | |
| } | |
| const actualHash = computeLineHash(ref.line, fileLines[ref.line - 1]); | |
| if (actualHash === ref.hash) { | |
| return true; | |
| } | |
| mismatches.push({ line: ref.line, expected: ref.hash, actual: actualHash }); | |
| return false; | |
| } | |
| for (const edit of edits) { | |
| switch (edit.op) { | |
| case "replace": { | |
| if (edit.pos) { | |
| if (edit.end) { | |
| const startValid = validateRef(edit.pos); | |
| const endValid = validateRef(edit.end); | |
| if (!startValid || !endValid) continue; | |
| if (edit.pos.line > edit.end.line) { | |
| throw new Error(`Range start line ${edit.pos.line} must be <= end line ${edit.end.line}`); | |
| } | |
| } else { | |
| if (!validateRef(edit.pos)) continue; | |
| } | |
| } | |
| break; | |
| } | |
| case "append": { | |
| if (edit.pos && !validateRef(edit.pos)) continue; | |
| if (edit.lines.length === 0) { | |
| edit.lines = [""]; | |
| } | |
| break; | |
| } | |
| case "prepend": { | |
| if (edit.pos && !validateRef(edit.pos)) continue; | |
| if (edit.lines.length === 0) { | |
| edit.lines = [""]; | |
| } | |
| break; | |
| } | |
| } | |
| } | |
| if (mismatches.length > 0) { | |
| throw new Error(formatMismatchMessage(mismatches, fileLines)); | |
| } | |
| // Auto-correct escaped tab indentation | |
| for (const edit of edits) { | |
| if (edit.lines.length === 0) continue; | |
| const hasEscapedTabs = edit.lines.some((line) => line.includes("\\t")); | |
| if (!hasEscapedTabs) continue; | |
| const hasRealTabs = edit.lines.some((line) => line.includes("\t")); | |
| if (hasRealTabs) continue; | |
| let correctedCount = 0; | |
| const corrected = edit.lines.map((line) => | |
| line.replace(/^((?:\\t)+)/, (escaped) => { | |
| correctedCount += escaped.length / 2; | |
| return "\t".repeat(escaped.length / 2); | |
| }), | |
| ); | |
| if (correctedCount === 0) continue; | |
| edit.lines = corrected; | |
| warnings.push( | |
| `Auto-corrected escaped tab indentation: converted leading \\t sequence(s) to real tab characters`, | |
| ); | |
| } | |
| // Deduplicate identical edits | |
| const seenEditKeys = new Map<string, number>(); | |
| const dedupIndices = new Set<number>(); | |
| for (let i = 0; i < edits.length; i++) { | |
| const edit = edits[i]; | |
| let lineKey: string; | |
| switch (edit.op) { | |
| case "replace": | |
| lineKey = edit.end ? `r:${edit.pos!.line}:${edit.end.line}` : `s:${edit.pos!.line}`; | |
| break; | |
| case "append": | |
| lineKey = edit.pos ? `i:${edit.pos.line}` : "ieof"; | |
| break; | |
| case "prepend": | |
| lineKey = edit.pos ? `ib:${edit.pos.line}` : "ibef"; | |
| break; | |
| } | |
| const dstKey = `${lineKey}:${edit.lines.join("\n")}`; | |
| if (seenEditKeys.has(dstKey)) { | |
| dedupIndices.add(i); | |
| } else { | |
| seenEditKeys.set(dstKey, i); | |
| } | |
| } | |
| if (dedupIndices.size > 0) { | |
| for (let i = edits.length - 1; i >= 0; i--) { | |
| if (dedupIndices.has(i)) edits.splice(i, 1); | |
| } | |
| } | |
| // Sort bottom-up | |
| const annotated = edits.map((edit, idx) => { | |
| let sortLine: number; | |
| let precedence: number; | |
| switch (edit.op) { | |
| case "replace": | |
| sortLine = edit.end ? edit.end.line : edit.pos!.line; | |
| precedence = 0; | |
| break; | |
| case "append": | |
| sortLine = edit.pos ? edit.pos.line : fileLines.length + 1; | |
| precedence = 1; | |
| break; | |
| case "prepend": | |
| sortLine = edit.pos ? edit.pos.line : 0; | |
| precedence = 2; | |
| break; | |
| } | |
| return { edit, idx, sortLine, precedence }; | |
| }); | |
| annotated.sort((a, b) => b.sortLine - a.sortLine || a.precedence - b.precedence || a.idx - b.idx); | |
| function trackFirstChanged(line: number): void { | |
| if (firstChangedLine === undefined || line < firstChangedLine) { | |
| firstChangedLine = line; | |
| } | |
| } | |
| // Apply edits bottom-up | |
| for (const { edit } of annotated) { | |
| switch (edit.op) { | |
| case "replace": { | |
| if (!edit.end) { | |
| // Single line replace | |
| const origLines = originalFileLines.slice(edit.pos!.line - 1, edit.pos!.line); | |
| if (origLines.length === edit.lines.length && origLines.every((l, i) => l === edit.lines[i])) { | |
| break; // noop | |
| } | |
| fileLines.splice(edit.pos!.line - 1, 1, ...edit.lines); | |
| trackFirstChanged(edit.pos!.line); | |
| } else { | |
| // Range replace | |
| const count = edit.end.line - edit.pos!.line + 1; | |
| const newLines = [...edit.lines]; | |
| // Auto-correct trailing duplication | |
| const trailingLine = newLines[newLines.length - 1]?.trimEnd(); | |
| const nextSurviving = fileLines[edit.end.line]?.trimEnd(); | |
| if ( | |
| shouldAutocorrect(trailingLine, nextSurviving) && | |
| fileLines[edit.end.line - 1]?.trimEnd() !== trailingLine | |
| ) { | |
| newLines.pop(); | |
| warnings.push( | |
| `Auto-corrected range replace: removed trailing replacement line "${trailingLine}" that duplicated next surviving line`, | |
| ); | |
| } | |
| // Auto-correct leading duplication | |
| const leadingLine = newLines[0]?.trimEnd(); | |
| const prevSurviving = fileLines[edit.pos!.line - 2]?.trimEnd(); | |
| if ( | |
| shouldAutocorrect(leadingLine, prevSurviving) && | |
| fileLines[edit.pos!.line - 1]?.trimEnd() !== leadingLine | |
| ) { | |
| newLines.shift(); | |
| warnings.push( | |
| `Auto-corrected range replace: removed leading replacement line "${leadingLine}" that duplicated preceding surviving line`, | |
| ); | |
| } | |
| fileLines.splice(edit.pos!.line - 1, count, ...newLines); | |
| trackFirstChanged(edit.pos!.line); | |
| } | |
| break; | |
| } | |
| case "append": { | |
| if (edit.lines.length === 0) break; | |
| if (edit.pos) { | |
| fileLines.splice(edit.pos.line, 0, ...edit.lines); | |
| trackFirstChanged(edit.pos.line + 1); | |
| } else { | |
| if (fileLines.length === 1 && fileLines[0] === "") { | |
| fileLines.splice(0, 1, ...edit.lines); | |
| trackFirstChanged(1); | |
| } else { | |
| fileLines.splice(fileLines.length, 0, ...edit.lines); | |
| trackFirstChanged(fileLines.length - edit.lines.length + 1); | |
| } | |
| } | |
| break; | |
| } | |
| case "prepend": { | |
| if (edit.lines.length === 0) break; | |
| if (edit.pos) { | |
| fileLines.splice(edit.pos.line - 1, 0, ...edit.lines); | |
| trackFirstChanged(edit.pos.line); | |
| } else { | |
| if (fileLines.length === 1 && fileLines[0] === "") { | |
| fileLines.splice(0, 1, ...edit.lines); | |
| } else { | |
| fileLines.splice(0, 0, ...edit.lines); | |
| } | |
| trackFirstChanged(1); | |
| } | |
| break; | |
| } | |
| } | |
| } | |
| return { | |
| result: fileLines.join("\n"), | |
| firstChangedLine, | |
| warnings, | |
| }; | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Unified diff generation (matches built-in edit tool output) | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| /** | |
| * Compute the longest common subsequence (LCS) table for two line arrays. | |
| * Returns a 2D array where lcs[i][j] = length of LCS of old[0..i-1] and new[0..j-1]. | |
| * Uses O(min(m,n)) space via two-row optimization for the table, | |
| * then reconstructs the full edit script. | |
| */ | |
| function computeLineEdits( | |
| oldLines: string[], | |
| newLines: string[], | |
| ): Array<{ type: "keep" | "remove" | "add"; oldIdx?: number; newIdx?: number }> { | |
| const n = oldLines.length; | |
| const m = newLines.length; | |
| // Myers-style or simple LCS — for correctness and simplicity, use DP with O(n*m). | |
| // For very large files this could be slow, but it matches the built-in tool's behavior. | |
| // We use a two-row DP for space efficiency, then backtrack. | |
| // For backtracking we need the full table — but that's O(n*m) space. | |
| // Instead, use the Hirschberg approach or just build the table for reasonable sizes. | |
| // For files up to ~5000 lines this is fine. | |
| // Build full LCS table for backtracking | |
| const dp: Uint32Array[] = new Array(n + 1); | |
| for (let i = 0; i <= n; i++) { | |
| dp[i] = new Uint32Array(m + 1); | |
| } | |
| for (let i = 1; i <= n; i++) { | |
| for (let j = 1; j <= m; j++) { | |
| if (oldLines[i - 1] === newLines[j - 1]) { | |
| dp[i][j] = dp[i - 1][j - 1] + 1; | |
| } else { | |
| dp[i][j] = Math.max(dp[i - 1][j], dp[i][j - 1]); | |
| } | |
| } | |
| } | |
| // Backtrack to find the edit script | |
| const edits: Array<{ type: "keep" | "remove" | "add"; oldIdx?: number; newIdx?: number }> = []; | |
| let i = n; | |
| let j = m; | |
| while (i > 0 || j > 0) { | |
| if (i > 0 && j > 0 && oldLines[i - 1] === newLines[j - 1]) { | |
| edits.push({ type: "keep", oldIdx: i - 1, newIdx: j - 1 }); | |
| i--; | |
| j--; | |
| } else if (j > 0 && (i === 0 || dp[i][j - 1] >= dp[i - 1][j])) { | |
| edits.push({ type: "add", newIdx: j - 1 }); | |
| j--; | |
| } else { | |
| edits.push({ type: "remove", oldIdx: i - 1 }); | |
| i--; | |
| } | |
| } | |
| edits.reverse(); | |
| return edits; | |
| } | |
| function generateDiff( | |
| oldContent: string, | |
| newContent: string, | |
| contextLines: number = 4, | |
| ): { diff: string; firstChangedLine: number | undefined; addedLines: number; removedLines: number } { | |
| const oldLines = oldContent.split("\n"); | |
| const newLines = newContent.split("\n"); | |
| const maxLineNum = Math.max(oldLines.length, newLines.length); | |
| const lineNumWidth = String(maxLineNum).length; | |
| const edits = computeLineEdits(oldLines, newLines); | |
| // Mark which edit indices are changes (add/remove) | |
| const isChange = edits.map((e) => e.type !== "keep"); | |
| // For each edit, determine if it's within `contextLines` of a change | |
| const showLine = new Array(edits.length).fill(false); | |
| let lastChangeIdx = -Infinity; | |
| // Forward pass: mark lines after changes | |
| for (let i = 0; i < edits.length; i++) { | |
| if (isChange[i]) { | |
| lastChangeIdx = i; | |
| showLine[i] = true; | |
| } else if (i - lastChangeIdx <= contextLines) { | |
| showLine[i] = true; | |
| } | |
| } | |
| // Backward pass: mark lines before changes | |
| let nextChangeIdx = Infinity; | |
| for (let i = edits.length - 1; i >= 0; i--) { | |
| if (isChange[i]) { | |
| nextChangeIdx = i; | |
| } else if (nextChangeIdx - i <= contextLines) { | |
| showLine[i] = true; | |
| } | |
| } | |
| const output: string[] = []; | |
| let firstChangedLine: number | undefined; | |
| let addedLines = 0; | |
| let removedLines = 0; | |
| let inEllipsis = false; | |
| for (let i = 0; i < edits.length; i++) { | |
| const edit = edits[i]; | |
| if (!showLine[i]) { | |
| if (!inEllipsis) { | |
| output.push(` ${"".padStart(lineNumWidth, " ")} ...`); | |
| inEllipsis = true; | |
| } | |
| continue; | |
| } | |
| inEllipsis = false; | |
| switch (edit.type) { | |
| case "keep": { | |
| const lineNum = String((edit.oldIdx ?? 0) + 1).padStart(lineNumWidth, " "); | |
| output.push(` ${lineNum} ${oldLines[edit.oldIdx!]}`); | |
| break; | |
| } | |
| case "remove": { | |
| const lineNum = String((edit.oldIdx ?? 0) + 1).padStart(lineNumWidth, " "); | |
| output.push(`-${lineNum} ${oldLines[edit.oldIdx!]}`); | |
| removedLines++; | |
| if (firstChangedLine === undefined) { | |
| firstChangedLine = (edit.oldIdx ?? 0) + 1; | |
| } | |
| break; | |
| } | |
| case "add": { | |
| const lineNum = String((edit.newIdx ?? 0) + 1).padStart(lineNumWidth, " "); | |
| output.push(`+${lineNum} ${newLines[edit.newIdx!]}`); | |
| addedLines++; | |
| if (firstChangedLine === undefined) { | |
| firstChangedLine = (edit.newIdx ?? 0) + 1; | |
| } | |
| break; | |
| } | |
| } | |
| } | |
| return { diff: output.join("\n"), firstChangedLine, addedLines, removedLines }; | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Image detection helpers | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| const IMAGE_EXTENSIONS = new Set([".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp", ".svg", ".ico"]); | |
| function isImagePath(filePath: string): boolean { | |
| return IMAGE_EXTENSIONS.has(extname(filePath).toLowerCase()); | |
| } | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| // Extension entry point | |
| // ═══════════════════════════════════════════════════════════════════════════ | |
| export default function (pi: ExtensionAPI) { | |
| // ─── Override read tool to add hashline tags ────────────────────────── | |
| const readSchema = Type.Object({ | |
| path: Type.String({ description: "Path to the file to read (relative or absolute)" }), | |
| offset: Type.Optional(Type.Number({ description: "Line number to start reading from (1-indexed)" })), | |
| limit: Type.Optional(Type.Number({ description: "Maximum number of lines to read" })), | |
| }); | |
| pi.registerTool({ | |
| name: "read", | |
| label: "Read (hashline)", | |
| description: `Read the contents of a file. Each line is tagged with LINE#ID for use with the edit tool. For text files, output is truncated to ${DEFAULT_MAX_LINES} lines or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first). Use offset/limit for large files. When you need the full file, continue with offset until complete.`, | |
| promptSnippet: "Read file contents with LINE#ID hashline tags for precise editing", | |
| promptGuidelines: [ | |
| "Every text line returned includes a LINE#ID prefix (e.g. 1#VR:content). Use these tags with the edit tool.", | |
| "Images (jpg, png, gif, webp) are returned as plain text descriptions — use a different tool for image viewing.", | |
| ], | |
| parameters: readSchema, | |
| async execute(_toolCallId, params, signal, _onUpdate, ctx) { | |
| const { path, offset, limit } = params; | |
| // Strip leading @ (some models add it) | |
| const cleanPath = path.startsWith("@") ? path.slice(1) : path; | |
| const absolutePath = resolve(ctx.cwd, cleanPath); | |
| if (signal?.aborted) { | |
| return { content: [{ type: "text" as const, text: "Operation aborted" }], details: {} }; | |
| } | |
| try { | |
| await access(absolutePath, constants.R_OK); | |
| } catch { | |
| return { | |
| content: [{ type: "text" as const, text: `File not found: ${cleanPath}` }], | |
| details: {}, | |
| isError: true, | |
| }; | |
| } | |
| // If it's an image, read as binary and return as image content | |
| if (isImagePath(absolutePath)) { | |
| try { | |
| const buffer = await readFile(absolutePath); | |
| const base64 = buffer.toString("base64"); | |
| const ext = extname(absolutePath).toLowerCase(); | |
| const mimeMap: Record<string, string> = { | |
| ".jpg": "image/jpeg", | |
| ".jpeg": "image/jpeg", | |
| ".png": "image/png", | |
| ".gif": "image/gif", | |
| ".webp": "image/webp", | |
| }; | |
| const mimeType = mimeMap[ext]; | |
| if (mimeType) { | |
| return { | |
| content: [ | |
| { type: "text" as const, text: `Read image file [${mimeType}]` }, | |
| { type: "image" as const, data: base64, mimeType } as any, | |
| ], | |
| details: {}, | |
| }; | |
| } | |
| return { | |
| content: [{ type: "text" as const, text: `[Image file: ${cleanPath}]` }], | |
| details: {}, | |
| }; | |
| } catch (imgErr: any) { | |
| return { | |
| content: [{ type: "text" as const, text: `Error reading image: ${imgErr.message}` }], | |
| details: {}, | |
| isError: true, | |
| }; | |
| } | |
| } | |
| try { | |
| const buffer = await readFile(absolutePath); | |
| const textContent = buffer.toString("utf-8"); | |
| const allLines = textContent.split("\n"); | |
| const totalFileLines = allLines.length; | |
| const startLine = offset ? Math.max(0, offset - 1) : 0; | |
| const startLineDisplay = startLine + 1; | |
| if (startLine >= allLines.length) { | |
| return { | |
| content: [{ type: "text" as const, text: `Offset ${offset} is beyond end of file (${allLines.length} lines total)` }], | |
| details: {}, | |
| isError: true, | |
| }; | |
| } | |
| let selectedLines: string[]; | |
| if (limit !== undefined) { | |
| const endLine = Math.min(startLine + limit, allLines.length); | |
| selectedLines = allLines.slice(startLine, endLine); | |
| } else { | |
| selectedLines = allLines.slice(startLine); | |
| } | |
| // Format with hashline tags | |
| const hashlined = selectedLines | |
| .map((line, i) => { | |
| const num = startLineDisplay + i; | |
| return `${formatLineTag(num, line)}:${line}`; | |
| }) | |
| .join("\n"); | |
| // Apply truncation | |
| const truncation = truncateHead(hashlined, { | |
| maxLines: DEFAULT_MAX_LINES, | |
| maxBytes: DEFAULT_MAX_BYTES, | |
| }); | |
| let text = truncation.content; | |
| if (truncation.truncated) { | |
| text += `\n\n[Showing lines ${startLineDisplay}-${startLineDisplay + truncation.outputLines - 1} of ${totalFileLines} (${formatSize(truncation.outputBytes)} of ${formatSize(truncation.totalBytes)}). Use offset=${startLineDisplay + truncation.outputLines} to continue.]`; | |
| } else if (offset || limit) { | |
| const shownLines = selectedLines.length; | |
| const lastLine = startLineDisplay + shownLines - 1; | |
| if (lastLine < totalFileLines) { | |
| text += `\n\n[Showing lines ${startLineDisplay}-${lastLine} of ${totalFileLines}. Use offset=${lastLine + 1} to continue.]`; | |
| } | |
| } | |
| return { | |
| content: [{ type: "text" as const, text }], | |
| details: { truncation: truncation.truncated ? truncation : undefined }, | |
| }; | |
| } catch (error: any) { | |
| return { | |
| content: [{ type: "text" as const, text: `Error reading file: ${error.message}` }], | |
| details: {}, | |
| isError: true, | |
| }; | |
| } | |
| }, | |
| }); | |
| // ─── Override edit tool with hashline-based editing ─────────────────── | |
| const anchorSchema = Type.String({ | |
| description: 'Line reference from read output, e.g. "5#VR"', | |
| }); | |
| const editOpSchema = Type.Object({ | |
| op: StringEnum(["replace", "append", "prepend"] as const), | |
| pos: Type.Optional(anchorSchema), | |
| end: Type.Optional(anchorSchema), | |
| lines: Type.Union([ | |
| Type.Array(Type.String()), | |
| Type.String(), | |
| Type.Null(), | |
| ], { description: "Replacement lines (array of strings), or null/[] to delete" }), | |
| }); | |
| const hashlineEditSchema = Type.Object({ | |
| path: Type.String({ description: "Path to the file to edit" }), | |
| edits: Type.Array(editOpSchema, { description: "Array of edit operations" }), | |
| }); | |
| pi.registerTool({ | |
| name: "edit", | |
| label: "Edit (hashline)", | |
| description: `Applies precise, surgical file edits by referencing LINE#ID tags from read output. Each tag uniquely identifies a line, so edits remain stable even when lines shift. Read the file first to get fresh tags. Submit one edit call per file with all operations batched.`, | |
| promptSnippet: "Edit files using LINE#ID hashline references from read output", | |
| promptGuidelines: [ | |
| "Read the file first to get LINE#ID tags. Every tag MUST be copied exactly from the most recent read output.", | |
| "Use replace with pos (and optional end for ranges) to change existing lines.", | |
| "Use append (after) or prepend (before) to insert new lines relative to an anchor.", | |
| "lines: null or [] deletes lines. lines: [\"\"] inserts a blank line.", | |
| "Batch all edits for one file in a single call. Tags shift after each edit, so multiple calls require re-reading.", | |
| "lines entries MUST be literal file content with indentation copied exactly. If the file uses tabs, use real tab characters.", | |
| "Do NOT use this tool to reformat or reindent — run the project's formatter instead.", | |
| "When changing block content, replace the entire body span, not just one line inside it.", | |
| "Block boundaries travel together: for { header / body / closer }, either replace only the body (pos=first body, end=last body) or the whole block (pos=header, end=closer, re-emit all in lines).", | |
| ], | |
| parameters: hashlineEditSchema, | |
| async execute(_toolCallId, params, signal, _onUpdate, ctx) { | |
| const { path, edits: rawEdits } = params; | |
| const cleanPath = path.startsWith("@") ? path.slice(1) : path; | |
| const absolutePath = resolve(ctx.cwd, cleanPath); | |
| if (signal?.aborted) { | |
| return { content: [{ type: "text" as const, text: "Operation aborted" }], details: {} }; | |
| } | |
| // Check file exists | |
| try { | |
| await access(absolutePath, constants.R_OK | constants.W_OK); | |
| } catch { | |
| // If file doesn't exist, check if this is a create-by-edit | |
| return { | |
| content: [{ type: "text" as const, text: `File not found or not writable: ${cleanPath}` }], | |
| details: {}, | |
| isError: true, | |
| }; | |
| } | |
| try { | |
| const buffer = await readFile(absolutePath); | |
| const rawContent = buffer.toString("utf-8"); | |
| const originalContent = rawContent; | |
| // Parse edits | |
| const edits: HashlineEdit[] = rawEdits.map((raw: any) => { | |
| const parsed: HashlineEdit = { | |
| op: raw.op, | |
| lines: parseEditLines(raw.lines), | |
| }; | |
| if (raw.pos) { | |
| parsed.pos = parseTag(raw.pos); | |
| } | |
| if (raw.end) { | |
| parsed.end = parseTag(raw.end); | |
| } | |
| return parsed; | |
| }); | |
| const { result: newContent, firstChangedLine, warnings } = applyHashlineEdits(originalContent, edits); | |
| if (originalContent === newContent) { | |
| return { | |
| content: [{ type: "text" as const, text: `No changes made to ${cleanPath}. The edits produced identical content.` }], | |
| details: { diff: "" }, | |
| }; | |
| } | |
| await writeFile(absolutePath, newContent, "utf-8"); | |
| // Generate diff | |
| const { diff, firstChangedLine: diffFirstChanged, addedLines, removedLines } = generateDiff(originalContent, newContent); | |
| let resultText = `Applied ${edits.length} edit(s) to ${cleanPath}`; | |
| if (addedLines > 0 || removedLines > 0) { | |
| resultText += ` (+${addedLines} -${removedLines} lines)`; | |
| } | |
| if (warnings.length > 0) { | |
| resultText += `\n\nWarnings:\n${warnings.map((w) => ` ⚠ ${w}`).join("\n")}`; | |
| } | |
| resultText += `\n\n${diff}`; | |
| return { | |
| content: [{ type: "text" as const, text: resultText }], | |
| details: { diff, firstChangedLine: firstChangedLine ?? diffFirstChanged }, | |
| }; | |
| } catch (error: any) { | |
| return { | |
| content: [{ type: "text" as const, text: `Edit failed: ${error.message}` }], | |
| details: {}, | |
| isError: true, | |
| }; | |
| } | |
| }, | |
| }); | |
| // ─── Notify on load ────────────────────────────────────────────────── | |
| pi.on("session_start", async (_event, ctx) => { | |
| ctx.ui.notify("Hashline edit mode active — read returns LINE#ID tags, edit uses hashline references", "info"); | |
| }); | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment