diff --git a/package.json b/package.json
index 9efbe511..1ee0285c 100644
--- a/package.json
+++ b/package.json
@@ -56,18 +56,14 @@
"@clack/prompts": "^0.11.0",
"@code-yeongyu/comment-checker": "^0.6.1",
"@modelcontextprotocol/sdk": "^1.25.1",
- "@openauthjs/openauth": "^0.4.3",
"@opencode-ai/plugin": "^1.1.19",
"@opencode-ai/sdk": "^1.1.19",
"commander": "^14.0.2",
"detect-libc": "^2.0.0",
- "hono": "^4.10.4",
"js-yaml": "^4.1.1",
"jsonc-parser": "^3.3.1",
- "open": "^11.0.0",
"picocolors": "^1.1.1",
"picomatch": "^4.0.2",
- "xdg-basedir": "^5.1.0",
"zod": "^4.1.8"
},
"devDependencies": {
diff --git a/src/agents/build-prompt.ts b/src/agents/build-prompt.ts
deleted file mode 100644
index f1b09523..00000000
--- a/src/agents/build-prompt.ts
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * OpenCode's default build agent system prompt.
- *
- * This prompt enables FULL EXECUTION mode for the build agent, allowing file
- * modifications, command execution, and system changes while focusing on
- * implementation and execution.
- *
- * Inspired by OpenCode's build agent behavior.
- *
- * @see https://github.com/sst/opencode/blob/6f9bea4e1f3d139feefd0f88de260b04f78caaef/packages/opencode/src/session/prompt/build-switch.txt
- * @see https://github.com/sst/opencode/blob/6f9bea4e1f3d139feefd0f88de260b04f78caaef/packages/opencode/src/agent/agent.ts#L118-L125
- */
-export const BUILD_SYSTEM_PROMPT = `
-# Build Mode - System Reminder
-
-BUILD MODE ACTIVE - you are in EXECUTION phase. Your responsibility is to:
-- Implement features and make code changes
-- Execute commands and run tests
-- Fix bugs and refactor code
-- Deploy and build systems
-- Make all necessary file modifications
-
-You have FULL permissions to edit files, run commands, and make system changes.
-This is the implementation phase - execute decisively and thoroughly.
-
----
-
-## Responsibility
-
-Your current responsibility is to implement, build, and execute. You should:
-- Write and modify code to accomplish the user's goals
-- Run tests and builds to verify your changes
-- Fix errors and issues that arise
-- Use all available tools to complete the task efficiently
-- Delegate to specialized agents when appropriate for better results
-
-**NOTE:** You should ask the user for clarification when requirements are ambiguous,
-but once the path is clear, execute confidently. The goal is to deliver working,
-tested, production-ready solutions.
-
----
-
-## Important
-
-The user wants you to execute and implement. You SHOULD make edits, run necessary
-tools, and make changes to accomplish the task. Use your full capabilities to
-deliver excellent results.
-
-`
-
-/**
- * OpenCode's default build agent permission configuration.
- *
- * Allows the build agent full execution permissions:
- * - edit: "ask" - Can modify files with confirmation
- * - bash: "ask" - Can execute commands with confirmation
- * - webfetch: "allow" - Can fetch web content
- *
- * This provides balanced permissions - powerful but with safety checks.
- *
- * @see https://github.com/sst/opencode/blob/6f9bea4e1f3d139feefd0f88de260b04f78caaef/packages/opencode/src/agent/agent.ts#L57-L68
- * @see https://github.com/sst/opencode/blob/6f9bea4e1f3d139feefd0f88de260b04f78caaef/packages/opencode/src/agent/agent.ts#L118-L125
- */
-export const BUILD_PERMISSION = {
- edit: "ask" as const,
- bash: "ask" as const,
- webfetch: "allow" as const,
-}
diff --git a/src/agents/plan-prompt.ts b/src/agents/plan-prompt.ts
deleted file mode 100644
index 3f699da6..00000000
--- a/src/agents/plan-prompt.ts
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * OhMyOpenCode Plan Agent System Prompt
- *
- * A streamlined planner that:
- * - SKIPS user dialogue/Q&A (no user questioning)
- * - KEEPS context gathering via explore/librarian agents
- * - Uses Metis ONLY for AI slop guardrails
- * - Outputs plan directly to user (no file creation)
- *
- * For the full Prometheus experience with user dialogue, use "Prometheus (Planner)" agent.
- */
-export const PLAN_SYSTEM_PROMPT = `
-# Plan Mode - System Reminder
-
-## ABSOLUTE CONSTRAINTS (NON-NEGOTIABLE)
-
-### 1. NO IMPLEMENTATION - PLANNING ONLY
-You are a PLANNER, NOT an executor. You must NEVER:
-- Start implementing ANY task
-- Write production code
-- Execute the work yourself
-- "Get started" on any implementation
-- Begin coding even if user asks
-
-Your ONLY job is to CREATE THE PLAN. Implementation is done by OTHER agents AFTER you deliver the plan.
-If user says "implement this" or "start working", you respond: "I am the plan agent. I will create a detailed work plan for execution by other agents."
-
-### 2. READ-ONLY FILE ACCESS
-You may NOT create or edit any files. You can only READ files for context gathering.
-- Reading files for analysis: ALLOWED
-- ANY file creation or edits: STRICTLY FORBIDDEN
-
-### 3. PLAN OUTPUT
-Your deliverable is a structured work plan delivered directly in your response.
-You do NOT deliver code. You do NOT deliver implementations. You deliver PLANS.
-
-ZERO EXCEPTIONS to these constraints.
-
-
-You are a strategic planner. You bring foresight and structure to complex work.
-
-## Your Mission
-
-Create structured work plans that enable efficient execution by AI agents.
-
-## Workflow (Execute Phases Sequentially)
-
-### Phase 1: Context Gathering (Parallel)
-
-Launch **in parallel**:
-
-**Explore agents** (3-5 parallel):
-\`\`\`
-Task(subagent_type="explore", prompt="Find [specific aspect] in codebase...")
-\`\`\`
-- Similar implementations
-- Project patterns and conventions
-- Related test files
-- Architecture/structure
-
-**Librarian agents** (2-3 parallel):
-\`\`\`
-Task(subagent_type="librarian", prompt="Find documentation for [library/pattern]...")
-\`\`\`
-- Framework docs for relevant features
-- Best practices for the task type
-
-### Phase 2: AI Slop Guardrails
-
-Call \`Metis (Plan Consultant)\` with gathered context to identify guardrails:
-
-\`\`\`
-Task(
- subagent_type="Metis (Plan Consultant)",
- prompt="Based on this context, identify AI slop guardrails:
-
- User Request: {user's original request}
- Codebase Context: {findings from Phase 1}
-
- Generate:
- 1. AI slop patterns to avoid (over-engineering, unnecessary abstractions, verbose comments)
- 2. Common AI mistakes for this type of task
- 3. Project-specific conventions that must be followed
- 4. Explicit 'MUST NOT DO' guardrails"
-)
-\`\`\`
-
-### Phase 3: Plan Generation
-
-Generate a structured plan with:
-
-1. **Core Objective** - What we're achieving (1-2 sentences)
-2. **Concrete Deliverables** - Exact files/endpoints/features
-3. **Definition of Done** - Acceptance criteria
-4. **Must Have** - Required elements
-5. **Must NOT Have** - Forbidden patterns (from Metis guardrails)
-6. **Task Breakdown** - Sequential/parallel task flow
-7. **References** - Existing code to follow
-
-## Key Principles
-
-1. **Infer intent from context** - Use codebase patterns and common practices
-2. **Define concrete deliverables** - Exact outputs, not vague goals
-3. **Clarify what NOT to do** - Most important for preventing AI mistakes
-4. **References over instructions** - Point to existing code
-5. **Verifiable acceptance criteria** - Commands with expected outputs
-6. **Implementation + Test = ONE task** - NEVER separate
-7. **Parallelizability is MANDATORY** - Enable multi-agent execution
-`
-
-/**
- * OpenCode's default plan agent permission configuration.
- *
- * Restricts the plan agent to read-only operations:
- * - edit: "deny" - No file modifications allowed
- * - bash: Only read-only commands (ls, grep, git log, etc.)
- * - webfetch: "allow" - Can fetch web content for research
- *
- * @see https://github.com/sst/opencode/blob/db2abc1b2c144f63a205f668bd7267e00829d84a/packages/opencode/src/agent/agent.ts#L63-L107
- */
-export const PLAN_PERMISSION = {
- edit: "deny" as const,
- bash: {
- "cut*": "allow" as const,
- "diff*": "allow" as const,
- "du*": "allow" as const,
- "file *": "allow" as const,
- "find * -delete*": "ask" as const,
- "find * -exec*": "ask" as const,
- "find * -fprint*": "ask" as const,
- "find * -fls*": "ask" as const,
- "find * -fprintf*": "ask" as const,
- "find * -ok*": "ask" as const,
- "find *": "allow" as const,
- "git diff*": "allow" as const,
- "git log*": "allow" as const,
- "git show*": "allow" as const,
- "git status*": "allow" as const,
- "git branch": "allow" as const,
- "git branch -v": "allow" as const,
- "grep*": "allow" as const,
- "head*": "allow" as const,
- "less*": "allow" as const,
- "ls*": "allow" as const,
- "more*": "allow" as const,
- "pwd*": "allow" as const,
- "rg*": "allow" as const,
- "sort --output=*": "ask" as const,
- "sort -o *": "ask" as const,
- "sort*": "allow" as const,
- "stat*": "allow" as const,
- "tail*": "allow" as const,
- "tree -o *": "ask" as const,
- "tree*": "allow" as const,
- "uniq*": "allow" as const,
- "wc*": "allow" as const,
- "whereis*": "allow" as const,
- "which*": "allow" as const,
- "*": "ask" as const,
- },
- webfetch: "allow" as const,
-}
diff --git a/src/hooks/anthropic-context-window-limit-recovery/pruning-executor.ts b/src/hooks/anthropic-context-window-limit-recovery/pruning-executor.ts
deleted file mode 100644
index 376c602f..00000000
--- a/src/hooks/anthropic-context-window-limit-recovery/pruning-executor.ts
+++ /dev/null
@@ -1,125 +0,0 @@
-import type { DynamicContextPruningConfig } from "../../config"
-import type { PruningState, PruningResult } from "./pruning-types"
-import { executeDeduplication } from "./pruning-deduplication"
-import { executeSupersedeWrites } from "./pruning-supersede"
-import { executePurgeErrors } from "./pruning-purge-errors"
-import { applyPruning } from "./pruning-storage"
-import { log } from "../../shared/logger"
-
-const DEFAULT_PROTECTED_TOOLS = new Set([
- "task",
- "todowrite",
- "todoread",
- "lsp_rename",
- "session_read",
- "session_write",
- "session_search",
-])
-
-function createPruningState(): PruningState {
- return {
- toolIdsToPrune: new Set(),
- currentTurn: 0,
- fileOperations: new Map(),
- toolSignatures: new Map(),
- erroredTools: new Map(),
- }
-}
-
-export async function executeDynamicContextPruning(
- sessionID: string,
- config: DynamicContextPruningConfig,
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- client: any
-): Promise {
- const state = createPruningState()
-
- const protectedTools = new Set([
- ...DEFAULT_PROTECTED_TOOLS,
- ...(config.protected_tools || []),
- ])
-
- log("[pruning-executor] starting DCP", {
- sessionID,
- notification: config.notification,
- turnProtection: config.turn_protection,
- })
-
- let dedupCount = 0
- let supersedeCount = 0
- let purgeCount = 0
-
- if (config.strategies?.deduplication?.enabled !== false) {
- dedupCount = executeDeduplication(
- sessionID,
- state,
- { enabled: true },
- protectedTools
- )
- }
-
- if (config.strategies?.supersede_writes?.enabled !== false) {
- supersedeCount = executeSupersedeWrites(
- sessionID,
- state,
- {
- enabled: true,
- aggressive: config.strategies?.supersede_writes?.aggressive || false,
- },
- protectedTools
- )
- }
-
- if (config.strategies?.purge_errors?.enabled !== false) {
- purgeCount = executePurgeErrors(
- sessionID,
- state,
- {
- enabled: true,
- turns: config.strategies?.purge_errors?.turns || 5,
- },
- protectedTools
- )
- }
-
- const totalPruned = state.toolIdsToPrune.size
- const tokensSaved = await applyPruning(sessionID, state)
-
- log("[pruning-executor] DCP complete", {
- totalPruned,
- tokensSaved,
- deduplication: dedupCount,
- supersede: supersedeCount,
- purge: purgeCount,
- })
-
- const result: PruningResult = {
- itemsPruned: totalPruned,
- totalTokensSaved: tokensSaved,
- strategies: {
- deduplication: dedupCount,
- supersedeWrites: supersedeCount,
- purgeErrors: purgeCount,
- },
- }
-
- if (config.notification !== "off" && totalPruned > 0) {
- const message =
- config.notification === "detailed"
- ? `Pruned ${totalPruned} tool outputs (~${Math.round(tokensSaved / 1000)}k tokens). Dedup: ${dedupCount}, Supersede: ${supersedeCount}, Purge: ${purgeCount}`
- : `Pruned ${totalPruned} tool outputs (~${Math.round(tokensSaved / 1000)}k tokens)`
-
- await client.tui
- .showToast({
- body: {
- title: "Dynamic Context Pruning",
- message,
- variant: "success",
- duration: 3000,
- },
- })
- .catch(() => {})
- }
-
- return result
-}
diff --git a/src/hooks/anthropic-context-window-limit-recovery/pruning-purge-errors.ts b/src/hooks/anthropic-context-window-limit-recovery/pruning-purge-errors.ts
deleted file mode 100644
index 0cb36d1e..00000000
--- a/src/hooks/anthropic-context-window-limit-recovery/pruning-purge-errors.ts
+++ /dev/null
@@ -1,152 +0,0 @@
-import { existsSync, readdirSync, readFileSync } from "node:fs"
-import { join } from "node:path"
-import type { PruningState, ErroredToolCall } from "./pruning-types"
-import { estimateTokens } from "./pruning-types"
-import { log } from "../../shared/logger"
-import { MESSAGE_STORAGE } from "../../features/hook-message-injector"
-
-export interface PurgeErrorsConfig {
- enabled: boolean
- turns: number
- protectedTools?: string[]
-}
-
-interface ToolPart {
- type: string
- callID?: string
- tool?: string
- state?: {
- input?: unknown
- output?: string
- status?: string
- }
-}
-
-interface MessagePart {
- type: string
- parts?: ToolPart[]
-}
-
-function getMessageDir(sessionID: string): string | null {
- if (!existsSync(MESSAGE_STORAGE)) return null
-
- const directPath = join(MESSAGE_STORAGE, sessionID)
- if (existsSync(directPath)) return directPath
-
- for (const dir of readdirSync(MESSAGE_STORAGE)) {
- const sessionPath = join(MESSAGE_STORAGE, dir, sessionID)
- if (existsSync(sessionPath)) return sessionPath
- }
-
- return null
-}
-
-function readMessages(sessionID: string): MessagePart[] {
- const messageDir = getMessageDir(sessionID)
- if (!messageDir) return []
-
- const messages: MessagePart[] = []
-
- try {
- const files = readdirSync(messageDir).filter(f => f.endsWith(".json"))
- for (const file of files) {
- const content = readFileSync(join(messageDir, file), "utf-8")
- const data = JSON.parse(content)
- if (data.parts) {
- messages.push(data)
- }
- }
- } catch {
- return []
- }
-
- return messages
-}
-
-export function executePurgeErrors(
- sessionID: string,
- state: PruningState,
- config: PurgeErrorsConfig,
- protectedTools: Set
-): number {
- if (!config.enabled) return 0
-
- const messages = readMessages(sessionID)
-
- let currentTurn = 0
-
- for (const msg of messages) {
- if (!msg.parts) continue
-
- for (const part of msg.parts) {
- if (part.type === "step-start") {
- currentTurn++
- }
- }
- }
-
- state.currentTurn = currentTurn
-
- let turnCounter = 0
- let prunedCount = 0
- let tokensSaved = 0
-
- for (const msg of messages) {
- if (!msg.parts) continue
-
- for (const part of msg.parts) {
- if (part.type === "step-start") {
- turnCounter++
- continue
- }
-
- if (part.type !== "tool" || !part.callID || !part.tool) continue
-
- if (protectedTools.has(part.tool)) continue
-
- if (config.protectedTools?.includes(part.tool)) continue
-
- if (state.toolIdsToPrune.has(part.callID)) continue
-
- if (part.state?.status !== "error") continue
-
- const turnAge = currentTurn - turnCounter
-
- if (turnAge >= config.turns) {
- state.toolIdsToPrune.add(part.callID)
- prunedCount++
-
- const input = part.state.input
- if (input) {
- tokensSaved += estimateTokens(JSON.stringify(input))
- }
-
- const errorInfo: ErroredToolCall = {
- callID: part.callID,
- toolName: part.tool,
- turn: turnCounter,
- errorAge: turnAge,
- }
-
- state.erroredTools.set(part.callID, errorInfo)
-
- log("[pruning-purge-errors] pruned old error", {
- tool: part.tool,
- callID: part.callID,
- turn: turnCounter,
- errorAge: turnAge,
- threshold: config.turns,
- })
- }
- }
- }
-
- log("[pruning-purge-errors] complete", {
- prunedCount,
- tokensSaved,
- currentTurn,
- threshold: config.turns,
- })
-
- return prunedCount
-}
diff --git a/src/hooks/anthropic-context-window-limit-recovery/pruning-storage.ts b/src/hooks/anthropic-context-window-limit-recovery/pruning-storage.ts
deleted file mode 100644
index 462e1d50..00000000
--- a/src/hooks/anthropic-context-window-limit-recovery/pruning-storage.ts
+++ /dev/null
@@ -1,101 +0,0 @@
-import { existsSync, readdirSync, readFileSync, writeFileSync } from "node:fs"
-import { join } from "node:path"
-import type { PruningState } from "./pruning-types"
-import { estimateTokens } from "./pruning-types"
-import { log } from "../../shared/logger"
-import { MESSAGE_STORAGE } from "../../features/hook-message-injector"
-
-function getMessageDir(sessionID: string): string | null {
- if (!existsSync(MESSAGE_STORAGE)) return null
-
- const directPath = join(MESSAGE_STORAGE, sessionID)
- if (existsSync(directPath)) return directPath
-
- for (const dir of readdirSync(MESSAGE_STORAGE)) {
- const sessionPath = join(MESSAGE_STORAGE, dir, sessionID)
- if (existsSync(sessionPath)) return sessionPath
- }
-
- return null
-}
-
-interface ToolPart {
- type: string
- callID?: string
- tool?: string
- state?: {
- input?: unknown
- output?: string
- status?: string
- }
-}
-
-interface MessageData {
- parts?: ToolPart[]
- [key: string]: unknown
-}
-
-export async function applyPruning(
- sessionID: string,
- state: PruningState
-): Promise {
- const messageDir = getMessageDir(sessionID)
- if (!messageDir) {
- log("[pruning-storage] message dir not found", { sessionID })
- return 0
- }
-
- let totalTokensSaved = 0
- let filesModified = 0
-
- try {
- const files = readdirSync(messageDir).filter(f => f.endsWith(".json"))
-
- for (const file of files) {
- const filePath = join(messageDir, file)
- const content = readFileSync(filePath, "utf-8")
- const data: MessageData = JSON.parse(content)
-
- if (!data.parts) continue
-
- let modified = false
-
- for (const part of data.parts) {
- if (part.type !== "tool" || !part.callID) continue
-
- if (!state.toolIdsToPrune.has(part.callID)) continue
-
- if (part.state?.input) {
- const inputStr = JSON.stringify(part.state.input)
- totalTokensSaved += estimateTokens(inputStr)
- part.state.input = { __pruned: true, reason: "DCP" }
- modified = true
- }
-
- if (part.state?.output) {
- totalTokensSaved += estimateTokens(part.state.output)
- part.state.output = "[Content pruned by Dynamic Context Pruning]"
- modified = true
- }
- }
-
- if (modified) {
- writeFileSync(filePath, JSON.stringify(data, null, 2), "utf-8")
- filesModified++
- }
- }
- } catch (error) {
- log("[pruning-storage] error applying pruning", {
- sessionID,
- error: String(error),
- })
- }
-
- log("[pruning-storage] applied pruning", {
- sessionID,
- filesModified,
- totalTokensSaved,
- })
-
- return totalTokensSaved
-}
diff --git a/src/hooks/anthropic-context-window-limit-recovery/pruning-supersede.ts b/src/hooks/anthropic-context-window-limit-recovery/pruning-supersede.ts
deleted file mode 100644
index 0a75d805..00000000
--- a/src/hooks/anthropic-context-window-limit-recovery/pruning-supersede.ts
+++ /dev/null
@@ -1,212 +0,0 @@
-import { existsSync, readdirSync, readFileSync } from "node:fs"
-import { join } from "node:path"
-import type { PruningState, FileOperation } from "./pruning-types"
-import { estimateTokens } from "./pruning-types"
-import { log } from "../../shared/logger"
-import { MESSAGE_STORAGE } from "../../features/hook-message-injector"
-
-export interface SupersedeWritesConfig {
- enabled: boolean
- aggressive: boolean
-}
-
-interface ToolPart {
- type: string
- callID?: string
- tool?: string
- state?: {
- input?: unknown
- output?: string
- }
-}
-
-interface MessagePart {
- type: string
- parts?: ToolPart[]
-}
-
-function getMessageDir(sessionID: string): string | null {
- if (!existsSync(MESSAGE_STORAGE)) return null
-
- const directPath = join(MESSAGE_STORAGE, sessionID)
- if (existsSync(directPath)) return directPath
-
- for (const dir of readdirSync(MESSAGE_STORAGE)) {
- const sessionPath = join(MESSAGE_STORAGE, dir, sessionID)
- if (existsSync(sessionPath)) return sessionPath
- }
-
- return null
-}
-
-function readMessages(sessionID: string): MessagePart[] {
- const messageDir = getMessageDir(sessionID)
- if (!messageDir) return []
-
- const messages: MessagePart[] = []
-
- try {
- const files = readdirSync(messageDir).filter(f => f.endsWith(".json"))
- for (const file of files) {
- const content = readFileSync(join(messageDir, file), "utf-8")
- const data = JSON.parse(content)
- if (data.parts) {
- messages.push(data)
- }
- }
- } catch {
- return []
- }
-
- return messages
-}
-
-function extractFilePath(toolName: string, input: unknown): string | null {
- if (!input || typeof input !== "object") return null
-
- const inputObj = input as Record
-
- if (toolName === "write" || toolName === "edit" || toolName === "read") {
- if (typeof inputObj.filePath === "string") {
- return inputObj.filePath
- }
- }
-
- return null
-}
-
-export function executeSupersedeWrites(
- sessionID: string,
- state: PruningState,
- config: SupersedeWritesConfig,
- protectedTools: Set
-): number {
- if (!config.enabled) return 0
-
- const messages = readMessages(sessionID)
- const writesByFile = new Map()
- const readsByFile = new Map()
-
- let currentTurn = 0
-
- for (const msg of messages) {
- if (!msg.parts) continue
-
- for (const part of msg.parts) {
- if (part.type === "step-start") {
- currentTurn++
- continue
- }
-
- if (part.type !== "tool" || !part.callID || !part.tool) continue
-
- if (protectedTools.has(part.tool)) continue
-
- if (state.toolIdsToPrune.has(part.callID)) continue
-
- const filePath = extractFilePath(part.tool, part.state?.input)
- if (!filePath) continue
-
- if (part.tool === "write" || part.tool === "edit") {
- if (!writesByFile.has(filePath)) {
- writesByFile.set(filePath, [])
- }
- writesByFile.get(filePath)!.push({
- callID: part.callID,
- tool: part.tool,
- filePath,
- turn: currentTurn,
- })
-
- if (!state.fileOperations.has(filePath)) {
- state.fileOperations.set(filePath, [])
- }
- state.fileOperations.get(filePath)!.push({
- callID: part.callID,
- tool: part.tool,
- filePath,
- turn: currentTurn,
- })
- } else if (part.tool === "read") {
- if (!readsByFile.has(filePath)) {
- readsByFile.set(filePath, [])
- }
- readsByFile.get(filePath)!.push(currentTurn)
- }
- }
- }
-
- let prunedCount = 0
- let tokensSaved = 0
-
- for (const [filePath, writes] of writesByFile) {
- const reads = readsByFile.get(filePath) || []
-
- if (config.aggressive) {
- for (const write of writes) {
- const superseded = reads.some(readTurn => readTurn > write.turn)
- if (superseded) {
- state.toolIdsToPrune.add(write.callID)
- prunedCount++
-
- const input = findToolInput(messages, write.callID)
- if (input) {
- tokensSaved += estimateTokens(JSON.stringify(input))
- }
-
- log("[pruning-supersede] pruned superseded write", {
- tool: write.tool,
- callID: write.callID,
- turn: write.turn,
- filePath,
- })
- }
- }
- } else {
- if (writes.length > 1) {
- for (const write of writes.slice(0, -1)) {
- const superseded = reads.some(readTurn => readTurn > write.turn)
- if (superseded) {
- state.toolIdsToPrune.add(write.callID)
- prunedCount++
-
- const input = findToolInput(messages, write.callID)
- if (input) {
- tokensSaved += estimateTokens(JSON.stringify(input))
- }
-
- log("[pruning-supersede] pruned superseded write (conservative)", {
- tool: write.tool,
- callID: write.callID,
- turn: write.turn,
- filePath,
- })
- }
- }
- }
- }
- }
-
- log("[pruning-supersede] complete", {
- prunedCount,
- tokensSaved,
- filesTracked: writesByFile.size,
- mode: config.aggressive ? "aggressive" : "conservative",
- })
-
- return prunedCount
-}
-
-function findToolInput(messages: MessagePart[], callID: string): unknown | null {
- for (const msg of messages) {
- if (!msg.parts) continue
-
- for (const part of msg.parts) {
- if (part.type === "tool" && part.callID === callID && part.state?.input) {
- return part.state.input
- }
- }
- }
-
- return null
-}
diff --git a/src/hooks/comment-checker/constants.ts b/src/hooks/comment-checker/constants.ts
deleted file mode 100644
index 066964b8..00000000
--- a/src/hooks/comment-checker/constants.ts
+++ /dev/null
@@ -1,83 +0,0 @@
-export const BDD_KEYWORDS = new Set([
- "given",
- "when",
- "then",
- "arrange",
- "act",
- "assert",
- "when & then",
- "when&then",
-])
-
-export const TYPE_CHECKER_PREFIXES = [
- "type:",
- "noqa",
- "pyright:",
- "ruff:",
- "mypy:",
- "pylint:",
- "flake8:",
- "pyre:",
- "pytype:",
- "eslint-disable",
- "eslint-enable",
- "eslint-ignore",
- "prettier-ignore",
- "ts-ignore",
- "ts-expect-error",
- "ts-nocheck",
- "clippy::",
- "allow(",
- "deny(",
- "warn(",
- "forbid(",
- "nolint",
- "go:generate",
- "go:build",
- "go:embed",
- "coverage:",
- "c8 ignore",
- "biome-ignore",
- "region",
- "endregion",
-]
-
-export const HOOK_MESSAGE_HEADER = `COMMENT/DOCSTRING DETECTED - IMMEDIATE ACTION REQUIRED
-
-Your recent changes contain comments or docstrings, which triggered this hook.
-You need to take immediate action. You must follow the conditions below.
-(Listed in priority order - you must always act according to this priority order)
-
-CRITICAL WARNING: This hook message MUST NEVER be ignored, even if you receive it multiple times.
-You MUST provide corresponding explanation or action for EACH occurrence of this message.
-Ignoring this message or failing to respond appropriately is strictly prohibited.
-
-PRIORITY-BASED ACTION GUIDELINES:
-
-1. This is a comment/docstring that already existed before
-\t-> Explain to the user that this is an existing comment/docstring and proceed (justify it)
-
-2. This is a newly written comment: but it's in given, when, then format
-\t-> Tell the user it's a BDD comment and proceed (justify it)
-\t-> Note: This applies to comments only, not docstrings
-
-3. This is a newly written comment/docstring: but it's a necessary comment/docstring
-\t-> Tell the user why this comment/docstring is absolutely necessary and proceed (justify it)
-\t-> Examples of necessary comments: complex algorithms, security-related, performance optimization, regex, mathematical formulas
-\t-> Examples of necessary docstrings: public API documentation, complex module/class interfaces
-\t-> IMPORTANT: Most docstrings are unnecessary if the code is self-explanatory. Only keep truly essential ones.
-
-4. This is a newly written comment/docstring: but it's an unnecessary comment/docstring
-\t-> Apologize to the user and remove the comment/docstring.
-\t-> Make the code itself clearer so it can be understood without comments/docstrings.
-\t-> For verbose docstrings: refactor code to be self-documenting instead of adding lengthy explanations.
-
-CODE SMELL WARNING: Using comments as visual separators (e.g., "// =========", "# ---", "// *** Section ***")
-is a code smell. If you need separators, your file is too long or poorly organized.
-Refactor into smaller modules or use proper code organization instead of comment-based section dividers.
-
-MANDATORY REQUIREMENT: You must acknowledge this hook message and take one of the above actions.
-Review in the above priority order and take the corresponding action EVERY TIME this appears.
-
-Detected comments/docstrings:
-`
diff --git a/src/hooks/comment-checker/filters/bdd.ts b/src/hooks/comment-checker/filters/bdd.ts
deleted file mode 100644
index f4e22e38..00000000
--- a/src/hooks/comment-checker/filters/bdd.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-import type { CommentInfo, FilterResult } from "../types"
-import { BDD_KEYWORDS } from "../constants"
-
-function stripCommentPrefix(text: string): string {
- let stripped = text.trim().toLowerCase()
- const prefixes = ["#", "//", "--", "/*", "*/"]
- for (const prefix of prefixes) {
- if (stripped.startsWith(prefix)) {
- stripped = stripped.slice(prefix.length).trim()
- }
- }
- return stripped
-}
-
-export function filterBddComments(comment: CommentInfo): FilterResult {
- const normalized = stripCommentPrefix(comment.text)
- if (BDD_KEYWORDS.has(normalized)) {
- return { shouldSkip: true, reason: `BDD keyword: ${normalized}` }
- }
- return { shouldSkip: false }
-}
diff --git a/src/hooks/comment-checker/filters/directive.ts b/src/hooks/comment-checker/filters/directive.ts
deleted file mode 100644
index d8312160..00000000
--- a/src/hooks/comment-checker/filters/directive.ts
+++ /dev/null
@@ -1,24 +0,0 @@
-import type { CommentInfo, FilterResult } from "../types"
-import { TYPE_CHECKER_PREFIXES } from "../constants"
-
-function stripCommentPrefix(text: string): string {
- let stripped = text.trim().toLowerCase()
- const prefixes = ["#", "//", "/*", "--"]
- for (const prefix of prefixes) {
- if (stripped.startsWith(prefix)) {
- stripped = stripped.slice(prefix.length).trim()
- }
- }
- stripped = stripped.replace(/^@/, "")
- return stripped
-}
-
-export function filterDirectiveComments(comment: CommentInfo): FilterResult {
- const normalized = stripCommentPrefix(comment.text)
- for (const prefix of TYPE_CHECKER_PREFIXES) {
- if (normalized.startsWith(prefix.toLowerCase())) {
- return { shouldSkip: true, reason: `Directive: ${prefix}` }
- }
- }
- return { shouldSkip: false }
-}
diff --git a/src/hooks/comment-checker/filters/docstring.ts b/src/hooks/comment-checker/filters/docstring.ts
deleted file mode 100644
index d30abd2b..00000000
--- a/src/hooks/comment-checker/filters/docstring.ts
+++ /dev/null
@@ -1,12 +0,0 @@
-import type { CommentInfo, FilterResult } from "../types"
-
-export function filterDocstringComments(comment: CommentInfo): FilterResult {
- if (comment.isDocstring) {
- return { shouldSkip: true, reason: "Docstring" }
- }
- const trimmed = comment.text.trimStart()
- if (trimmed.startsWith("/**")) {
- return { shouldSkip: true, reason: "JSDoc/PHPDoc" }
- }
- return { shouldSkip: false }
-}
diff --git a/src/hooks/comment-checker/filters/index.ts b/src/hooks/comment-checker/filters/index.ts
deleted file mode 100644
index dcd81570..00000000
--- a/src/hooks/comment-checker/filters/index.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-import type { CommentInfo, CommentFilter } from "../types"
-import { filterBddComments } from "./bdd"
-import { filterDirectiveComments } from "./directive"
-import { filterDocstringComments } from "./docstring"
-import { filterShebangComments } from "./shebang"
-
-export { filterBddComments, filterDirectiveComments, filterDocstringComments, filterShebangComments }
-
-const ALL_FILTERS: CommentFilter[] = [
- filterShebangComments,
- filterBddComments,
- filterDirectiveComments,
- filterDocstringComments,
-]
-
-export function applyFilters(comments: CommentInfo[]): CommentInfo[] {
- return comments.filter((comment) => {
- for (const filter of ALL_FILTERS) {
- const result = filter(comment)
- if (result.shouldSkip) {
- return false
- }
- }
- return true
- })
-}
diff --git a/src/hooks/comment-checker/filters/shebang.ts b/src/hooks/comment-checker/filters/shebang.ts
deleted file mode 100644
index 17c247b7..00000000
--- a/src/hooks/comment-checker/filters/shebang.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-import type { CommentInfo, FilterResult } from "../types"
-
-export function filterShebangComments(comment: CommentInfo): FilterResult {
- const trimmed = comment.text.trimStart()
- if (trimmed.startsWith("#!")) {
- return { shouldSkip: true, reason: "Shebang" }
- }
- return { shouldSkip: false }
-}
diff --git a/src/hooks/comment-checker/output/formatter.ts b/src/hooks/comment-checker/output/formatter.ts
deleted file mode 100644
index b8eaaeac..00000000
--- a/src/hooks/comment-checker/output/formatter.ts
+++ /dev/null
@@ -1,11 +0,0 @@
-import type { FileComments } from "../types"
-import { HOOK_MESSAGE_HEADER } from "../constants"
-import { buildCommentsXml } from "./xml-builder"
-
-export function formatHookMessage(fileCommentsList: FileComments[]): string {
- if (fileCommentsList.length === 0) {
- return ""
- }
- const xml = buildCommentsXml(fileCommentsList)
- return `${HOOK_MESSAGE_HEADER}${xml}\n`
-}
diff --git a/src/hooks/comment-checker/output/index.ts b/src/hooks/comment-checker/output/index.ts
deleted file mode 100644
index 5cb01e82..00000000
--- a/src/hooks/comment-checker/output/index.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-export { buildCommentsXml } from "./xml-builder"
-export { formatHookMessage } from "./formatter"
diff --git a/src/hooks/comment-checker/output/xml-builder.ts b/src/hooks/comment-checker/output/xml-builder.ts
deleted file mode 100644
index 38dc33dc..00000000
--- a/src/hooks/comment-checker/output/xml-builder.ts
+++ /dev/null
@@ -1,24 +0,0 @@
-import type { FileComments } from "../types"
-
-function escapeXml(text: string): string {
- return text
- .replace(/&/g, "&")
- .replace(//g, ">")
- .replace(/"/g, """)
- .replace(/'/g, "'")
-}
-
-export function buildCommentsXml(fileCommentsList: FileComments[]): string {
- const lines: string[] = []
-
- for (const fc of fileCommentsList) {
- lines.push(``)
- for (const comment of fc.comments) {
- lines.push(`\t${escapeXml(comment.text)}`)
- }
- lines.push(``)
- }
-
- return lines.join("\n")
-}
diff --git a/src/hooks/index.ts b/src/hooks/index.ts
index 65b784d9..55e194b0 100644
--- a/src/hooks/index.ts
+++ b/src/hooks/index.ts
@@ -14,7 +14,6 @@ export { createThinkModeHook } from "./think-mode";
export { createClaudeCodeHooksHook } from "./claude-code-hooks";
export { createRulesInjectorHook } from "./rules-injector";
export { createBackgroundNotificationHook } from "./background-notification"
-export { createBackgroundCompactionHook } from "./background-compaction"
export { createAutoUpdateCheckerHook } from "./auto-update-checker";
export { createAgentUsageReminderHook } from "./agent-usage-reminder";
diff --git a/src/tools/ast-grep/napi.ts b/src/tools/ast-grep/napi.ts
deleted file mode 100644
index c8d3880b..00000000
--- a/src/tools/ast-grep/napi.ts
+++ /dev/null
@@ -1,116 +0,0 @@
-import { parse, Lang } from "@ast-grep/napi"
-import { NAPI_LANGUAGES } from "./constants"
-import type { NapiLanguage, AnalyzeResult, MetaVariable, Range } from "./types"
-
-const LANG_MAP: Record = {
- html: Lang.Html,
- javascript: Lang.JavaScript,
- tsx: Lang.Tsx,
- css: Lang.Css,
- typescript: Lang.TypeScript,
-}
-
-export function parseCode(code: string, lang: NapiLanguage) {
- const parseLang = LANG_MAP[lang]
- if (!parseLang) {
- const supportedLangs = NAPI_LANGUAGES.join(", ")
- throw new Error(
- `Unsupported language for NAPI: "${lang}"\n` +
- `Supported languages: ${supportedLangs}\n\n` +
- `Use ast_grep_search for other languages (25 supported via CLI).`
- )
- }
- return parse(parseLang, code)
-}
-
-export function findPattern(root: ReturnType, pattern: string) {
- return root.root().findAll(pattern)
-}
-
-function nodeToRange(node: ReturnType["root"]>): Range {
- const range = node.range()
- return {
- start: { line: range.start.line, column: range.start.column },
- end: { line: range.end.line, column: range.end.column },
- }
-}
-
-function extractMetaVariablesFromPattern(pattern: string): string[] {
- const matches = pattern.match(/\$[A-Z_][A-Z0-9_]*/g) || []
- return [...new Set(matches.map((m) => m.slice(1)))]
-}
-
-export function extractMetaVariables(
- node: ReturnType["root"]>,
- pattern: string
-): MetaVariable[] {
- const varNames = extractMetaVariablesFromPattern(pattern)
- const result: MetaVariable[] = []
-
- for (const name of varNames) {
- const match = node.getMatch(name)
- if (match) {
- result.push({
- name,
- text: match.text(),
- kind: String(match.kind()),
- })
- }
- }
-
- return result
-}
-
-export function analyzeCode(
- code: string,
- lang: NapiLanguage,
- pattern: string,
- shouldExtractMetaVars: boolean
-): AnalyzeResult[] {
- const root = parseCode(code, lang)
- const matches = findPattern(root, pattern)
-
- return matches.map((node) => ({
- text: node.text(),
- range: nodeToRange(node),
- kind: String(node.kind()),
- metaVariables: shouldExtractMetaVars ? extractMetaVariables(node, pattern) : [],
- }))
-}
-
-export function transformCode(
- code: string,
- lang: NapiLanguage,
- pattern: string,
- rewrite: string
-): { transformed: string; editCount: number } {
- const root = parseCode(code, lang)
- const matches = findPattern(root, pattern)
-
- if (matches.length === 0) {
- return { transformed: code, editCount: 0 }
- }
-
- const edits = matches.map((node) => {
- const metaVars = extractMetaVariables(node, pattern)
- let replacement = rewrite
-
- for (const mv of metaVars) {
- replacement = replacement.replace(new RegExp(`\\$${mv.name}`, "g"), mv.text)
- }
-
- return node.replace(replacement)
- })
-
- const transformed = root.root().commitEdits(edits)
- return { transformed, editCount: edits.length }
-}
-
-export function getRootInfo(code: string, lang: NapiLanguage): { kind: string; childCount: number } {
- const root = parseCode(code, lang)
- const rootNode = root.root()
- return {
- kind: String(rootNode.kind()),
- childCount: rootNode.children().length,
- }
-}
diff --git a/src/tools/interactive-bash/types.ts b/src/tools/interactive-bash/types.ts
deleted file mode 100644
index de90a408..00000000
--- a/src/tools/interactive-bash/types.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-export interface InteractiveBashArgs {
- tmux_command: string
-}