Merge remote-tracking branch 'origin/dev' into refactor/modular-code-enforcement

# Conflicts:
#	src/features/background-agent/manager.ts
#	src/features/background-agent/spawner.ts
#	src/features/tmux-subagent/manager.ts
#	src/shared/model-availability.test.ts
#	src/shared/model-availability.ts
#	src/shared/model-resolution-pipeline.ts
#	src/tools/delegate-task/executor.ts
This commit is contained in:
YeonGyu-Kim 2026-02-08 21:38:58 +09:00
commit ce37924fd8
14 changed files with 3058 additions and 460 deletions

View File

@ -1,6 +1,8 @@
import { describe, it, expect, beforeEach, mock, spyOn } from "bun:test"
import { resolveSession } from "./session-resolver"
import type { OpencodeClient } from "./types"
/// <reference types="bun-types" />
import { beforeEach, describe, expect, it, mock, spyOn } from "bun:test";
import { resolveSession } from "./session-resolver";
import type { OpencodeClient } from "./types";
const createMockClient = (overrides: {
getResult?: { error?: unknown; data?: { id: string } }
@ -58,7 +60,9 @@ describe("resolveSession", () => {
const result = resolveSession({ client: mockClient, sessionId })
// then
await expect(result).rejects.toThrow(`Session not found: ${sessionId}`)
await Promise.resolve(
expect(result).rejects.toThrow(`Session not found: ${sessionId}`)
)
expect(mockClient.session.get).toHaveBeenCalledWith({
path: { id: sessionId },
})
@ -77,7 +81,12 @@ describe("resolveSession", () => {
// then
expect(result).toBe("new-session-id")
expect(mockClient.session.create).toHaveBeenCalledWith({
body: { title: "oh-my-opencode run" },
body: {
title: "oh-my-opencode run",
permission: [
{ permission: "question", action: "deny", pattern: "*" },
],
},
})
expect(mockClient.session.get).not.toHaveBeenCalled()
})
@ -98,7 +107,12 @@ describe("resolveSession", () => {
expect(result).toBe("retried-session-id")
expect(mockClient.session.create).toHaveBeenCalledTimes(2)
expect(mockClient.session.create).toHaveBeenCalledWith({
body: { title: "oh-my-opencode run" },
body: {
title: "oh-my-opencode run",
permission: [
{ permission: "question", action: "deny", pattern: "*" },
],
},
})
})
@ -116,7 +130,9 @@ describe("resolveSession", () => {
const result = resolveSession({ client: mockClient })
// then
await expect(result).rejects.toThrow("Failed to create session after all retries")
await Promise.resolve(
expect(result).rejects.toThrow("Failed to create session after all retries")
)
expect(mockClient.session.create).toHaveBeenCalledTimes(3)
})
@ -134,7 +150,9 @@ describe("resolveSession", () => {
const result = resolveSession({ client: mockClient })
// then
await expect(result).rejects.toThrow("Failed to create session after all retries")
await Promise.resolve(
expect(result).rejects.toThrow("Failed to create session after all retries")
)
expect(mockClient.session.create).toHaveBeenCalledTimes(3)
})
})

View File

@ -19,14 +19,18 @@ export async function resolveSession(options: {
return sessionId
}
let lastError: unknown
for (let attempt = 1; attempt <= SESSION_CREATE_MAX_RETRIES; attempt++) {
const res = await client.session.create({
body: { title: "oh-my-opencode run" },
body: {
title: "oh-my-opencode run",
// In CLI run mode there's no TUI to answer questions.
permission: [
{ permission: "question", action: "deny" as const, pattern: "*" },
],
} as any,
})
if (res.error) {
lastError = res.error
console.error(
pc.yellow(`Session create attempt ${attempt}/${SESSION_CREATE_MAX_RETRIES} failed:`)
)
@ -44,9 +48,6 @@ export async function resolveSession(options: {
return res.data.id
}
lastError = new Error(
`Unexpected response: ${JSON.stringify(res, null, 2)}`
)
console.error(
pc.yellow(
`Session create attempt ${attempt}/${SESSION_CREATE_MAX_RETRIES}: No session ID returned`

View File

@ -1412,14 +1412,14 @@ describe("BackgroundManager - Non-blocking Queue Integration", () => {
let manager: BackgroundManager
let mockClient: ReturnType<typeof createMockClient>
function createMockClient() {
return {
session: {
create: async () => ({ data: { id: `ses_${crypto.randomUUID()}` } }),
get: async () => ({ data: { directory: "/test/dir" } }),
prompt: async () => ({}),
promptAsync: async () => ({}),
messages: async () => ({ data: [] }),
function createMockClient() {
return {
session: {
create: async (_args?: any) => ({ data: { id: `ses_${crypto.randomUUID()}` } }),
get: async () => ({ data: { directory: "/test/dir" } }),
prompt: async () => ({}),
promptAsync: async () => ({}),
messages: async () => ({ data: [] }),
todo: async () => ({ data: [] }),
status: async () => ({ data: {} }),
abort: async () => ({}),
@ -1520,6 +1520,55 @@ describe("BackgroundManager - Non-blocking Queue Integration", () => {
})
describe("task transitions pending→running when slot available", () => {
test("should inherit parent session permission rules (and force deny question)", async () => {
// given
const createCalls: any[] = []
const parentPermission = [
{ permission: "question", action: "allow" as const, pattern: "*" },
{ permission: "plan_enter", action: "deny" as const, pattern: "*" },
]
const customClient = {
session: {
create: async (args?: any) => {
createCalls.push(args)
return { data: { id: `ses_${crypto.randomUUID()}` } }
},
get: async () => ({ data: { directory: "/test/dir", permission: parentPermission } }),
prompt: async () => ({}),
promptAsync: async () => ({}),
messages: async () => ({ data: [] }),
todo: async () => ({ data: [] }),
status: async () => ({ data: {} }),
abort: async () => ({}),
},
}
manager.shutdown()
manager = new BackgroundManager({ client: customClient, directory: tmpdir() } as unknown as PluginInput, {
defaultConcurrency: 5,
})
const input = {
description: "Test task",
prompt: "Do something",
agent: "test-agent",
parentSessionID: "parent-session",
parentMessageID: "parent-message",
}
// when
await manager.launch(input)
await new Promise(resolve => setTimeout(resolve, 50))
// then
expect(createCalls).toHaveLength(1)
const permission = createCalls[0]?.body?.permission
expect(permission).toEqual([
{ permission: "plan_enter", action: "deny", pattern: "*" },
{ permission: "question", action: "deny", pattern: "*" },
])
})
test("should transition first task to running immediately", async () => {
// given
const config = { defaultConcurrency: 5 }

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,65 @@
import { describe, test, expect } from "bun:test"
import { createTask, startTask } from "./spawner"
describe("background-agent spawner.startTask", () => {
test("should inherit parent session permission rules (and force deny question)", async () => {
//#given
const createCalls: any[] = []
const parentPermission = [
{ permission: "question", action: "allow" as const, pattern: "*" },
{ permission: "plan_enter", action: "deny" as const, pattern: "*" },
]
const client = {
session: {
get: async () => ({ data: { directory: "/parent/dir", permission: parentPermission } }),
create: async (args?: any) => {
createCalls.push(args)
return { data: { id: "ses_child" } }
},
promptAsync: async () => ({}),
},
}
const task = createTask({
description: "Test task",
prompt: "Do work",
agent: "explore",
parentSessionID: "ses_parent",
parentMessageID: "msg_parent",
})
const item = {
task,
input: {
description: task.description,
prompt: task.prompt,
agent: task.agent,
parentSessionID: task.parentSessionID,
parentMessageID: task.parentMessageID,
parentModel: task.parentModel,
parentAgent: task.parentAgent,
model: task.model,
},
}
const ctx = {
client,
directory: "/fallback",
concurrencyManager: { release: () => {} },
tmuxEnabled: false,
onTaskError: () => {},
}
//#when
await startTask(item as any, ctx as any)
//#then
expect(createCalls).toHaveLength(1)
expect(createCalls[0]?.body?.permission).toEqual([
{ permission: "plan_enter", action: "deny", pattern: "*" },
{ permission: "question", action: "deny", pattern: "*" },
])
})
})

View File

@ -1,4 +1,246 @@
export type { SpawnerContext } from "./spawner/spawner-context"
export { createTask } from "./spawner/task-factory"
export { startTask } from "./spawner/task-starter"
export { resumeTask } from "./spawner/task-resumer"
import type { BackgroundTask, LaunchInput, ResumeInput } from "./types"
import type { OpencodeClient, OnSubagentSessionCreated, QueueItem } from "./constants"
import { TMUX_CALLBACK_DELAY_MS } from "./constants"
import { log, getAgentToolRestrictions, promptWithModelSuggestionRetry } from "../../shared"
import { subagentSessions } from "../claude-code-session-state"
import { getTaskToastManager } from "../task-toast-manager"
import { isInsideTmux } from "../../shared/tmux"
import type { ConcurrencyManager } from "./concurrency"
export interface SpawnerContext {
client: OpencodeClient
directory: string
concurrencyManager: ConcurrencyManager
tmuxEnabled: boolean
onSubagentSessionCreated?: OnSubagentSessionCreated
onTaskError: (task: BackgroundTask, error: Error) => void
}
export function createTask(input: LaunchInput): BackgroundTask {
return {
id: `bg_${crypto.randomUUID().slice(0, 8)}`,
status: "pending",
queuedAt: new Date(),
description: input.description,
prompt: input.prompt,
agent: input.agent,
parentSessionID: input.parentSessionID,
parentMessageID: input.parentMessageID,
parentModel: input.parentModel,
parentAgent: input.parentAgent,
model: input.model,
}
}
export async function startTask(
item: QueueItem,
ctx: SpawnerContext
): Promise<void> {
const { task, input } = item
const { client, directory, concurrencyManager, tmuxEnabled, onSubagentSessionCreated, onTaskError } = ctx
log("[background-agent] Starting task:", {
taskId: task.id,
agent: input.agent,
model: input.model,
})
const concurrencyKey = input.model
? `${input.model.providerID}/${input.model.modelID}`
: input.agent
const parentSession = await client.session.get({
path: { id: input.parentSessionID },
}).catch((err) => {
log(`[background-agent] Failed to get parent session: ${err}`)
return null
})
const parentDirectory = parentSession?.data?.directory ?? directory
log(`[background-agent] Parent dir: ${parentSession?.data?.directory}, using: ${parentDirectory}`)
const inheritedPermission = (parentSession as any)?.data?.permission
const permissionRules = Array.isArray(inheritedPermission)
? inheritedPermission.filter((r: any) => r?.permission !== "question")
: []
permissionRules.push({ permission: "question", action: "deny" as const, pattern: "*" })
const createResult = await client.session.create({
body: {
parentID: input.parentSessionID,
title: `Background: ${input.description}`,
permission: permissionRules,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any,
query: {
directory: parentDirectory,
},
}).catch((error) => {
concurrencyManager.release(concurrencyKey)
throw error
})
if (createResult.error) {
concurrencyManager.release(concurrencyKey)
throw new Error(`Failed to create background session: ${createResult.error}`)
}
const sessionID = createResult.data.id
subagentSessions.add(sessionID)
log("[background-agent] tmux callback check", {
hasCallback: !!onSubagentSessionCreated,
tmuxEnabled,
isInsideTmux: isInsideTmux(),
sessionID,
parentID: input.parentSessionID,
})
if (onSubagentSessionCreated && tmuxEnabled && isInsideTmux()) {
log("[background-agent] Invoking tmux callback NOW", { sessionID })
await onSubagentSessionCreated({
sessionID,
parentID: input.parentSessionID,
title: input.description,
}).catch((err) => {
log("[background-agent] Failed to spawn tmux pane:", err)
})
log("[background-agent] tmux callback completed, waiting")
await new Promise(r => setTimeout(r, TMUX_CALLBACK_DELAY_MS))
} else {
log("[background-agent] SKIP tmux callback - conditions not met")
}
task.status = "running"
task.startedAt = new Date()
task.sessionID = sessionID
task.progress = {
toolCalls: 0,
lastUpdate: new Date(),
}
task.concurrencyKey = concurrencyKey
task.concurrencyGroup = concurrencyKey
log("[background-agent] Launching task:", { taskId: task.id, sessionID, agent: input.agent })
const toastManager = getTaskToastManager()
if (toastManager) {
toastManager.updateTask(task.id, "running")
}
log("[background-agent] Calling prompt (fire-and-forget) for launch with:", {
sessionID,
agent: input.agent,
model: input.model,
hasSkillContent: !!input.skillContent,
promptLength: input.prompt.length,
})
const launchModel = input.model
? { providerID: input.model.providerID, modelID: input.model.modelID }
: undefined
const launchVariant = input.model?.variant
promptWithModelSuggestionRetry(client, {
path: { id: sessionID },
body: {
agent: input.agent,
...(launchModel ? { model: launchModel } : {}),
...(launchVariant ? { variant: launchVariant } : {}),
system: input.skillContent,
tools: {
...getAgentToolRestrictions(input.agent),
task: false,
call_omo_agent: true,
question: false,
},
parts: [{ type: "text", text: input.prompt }],
},
}).catch((error) => {
log("[background-agent] promptAsync error:", error)
onTaskError(task, error instanceof Error ? error : new Error(String(error)))
})
}
export async function resumeTask(
task: BackgroundTask,
input: ResumeInput,
ctx: Pick<SpawnerContext, "client" | "concurrencyManager" | "onTaskError">
): Promise<void> {
const { client, concurrencyManager, onTaskError } = ctx
if (!task.sessionID) {
throw new Error(`Task has no sessionID: ${task.id}`)
}
if (task.status === "running") {
log("[background-agent] Resume skipped - task already running:", {
taskId: task.id,
sessionID: task.sessionID,
})
return
}
const concurrencyKey = task.concurrencyGroup ?? task.agent
await concurrencyManager.acquire(concurrencyKey)
task.concurrencyKey = concurrencyKey
task.concurrencyGroup = concurrencyKey
task.status = "running"
task.completedAt = undefined
task.error = undefined
task.parentSessionID = input.parentSessionID
task.parentMessageID = input.parentMessageID
task.parentModel = input.parentModel
task.parentAgent = input.parentAgent
task.startedAt = new Date()
task.progress = {
toolCalls: task.progress?.toolCalls ?? 0,
lastUpdate: new Date(),
}
subagentSessions.add(task.sessionID)
const toastManager = getTaskToastManager()
if (toastManager) {
toastManager.addTask({
id: task.id,
description: task.description,
agent: task.agent,
isBackground: true,
})
}
log("[background-agent] Resuming task:", { taskId: task.id, sessionID: task.sessionID })
log("[background-agent] Resuming task - calling prompt (fire-and-forget) with:", {
sessionID: task.sessionID,
agent: task.agent,
model: task.model,
promptLength: input.prompt.length,
})
const resumeModel = task.model
? { providerID: task.model.providerID, modelID: task.model.modelID }
: undefined
const resumeVariant = task.model?.variant
client.session.promptAsync({
path: { id: task.sessionID },
body: {
agent: task.agent,
...(resumeModel ? { model: resumeModel } : {}),
...(resumeVariant ? { variant: resumeVariant } : {}),
tools: {
...getAgentToolRestrictions(task.agent),
task: false,
call_omo_agent: true,
question: false,
},
parts: [{ type: "text", text: input.prompt }],
},
}).catch((error) => {
log("[background-agent] resume prompt error:", error)
onTaskError(task, error instanceof Error ? error : new Error(String(error)))
})
}

View File

@ -4,20 +4,23 @@ import type { TrackedSession, CapacityConfig } from "./types"
import {
isInsideTmux as defaultIsInsideTmux,
getCurrentPaneId as defaultGetCurrentPaneId,
POLL_INTERVAL_BACKGROUND_MS,
SESSION_MISSING_GRACE_MS,
SESSION_READY_POLL_INTERVAL_MS,
SESSION_READY_TIMEOUT_MS,
} from "../../shared/tmux"
import { log } from "../../shared"
import type { SessionMapping } from "./decision-engine"
import {
coerceSessionCreatedEvent,
handleSessionCreated,
handleSessionDeleted,
type SessionCreatedEvent,
} from "./event-handlers"
import { createSessionPollingController, type SessionPollingController } from "./polling"
import { cleanupTmuxSessions } from "./cleanup"
import { queryWindowState } from "./pane-state-querier"
import { decideSpawnActions, decideCloseAction, type SessionMapping } from "./decision-engine"
import { executeActions, executeAction } from "./action-executor"
import { TmuxPollingManager } from "./polling-manager"
type OpencodeClient = PluginInput["client"]
interface SessionCreatedEvent {
type: string
properties?: { info?: { id?: string; parentID?: string; title?: string } }
}
export interface TmuxUtilDeps {
isInsideTmux: () => boolean
getCurrentPaneId: () => string | undefined
@ -28,6 +31,13 @@ const defaultTmuxDeps: TmuxUtilDeps = {
getCurrentPaneId: defaultGetCurrentPaneId,
}
const SESSION_TIMEOUT_MS = 10 * 60 * 1000
// Stability detection constants (prevents premature closure - see issue #1330)
// Mirrors the proven pattern from background-agent/manager.ts
const MIN_STABILITY_TIME_MS = 10 * 1000 // Must run at least 10s before stability detection kicks in
const STABLE_POLLS_REQUIRED = 3 // 3 consecutive idle polls (~6s with 2s poll interval)
/**
* State-first Tmux Session Manager
*
@ -48,8 +58,7 @@ export class TmuxSessionManager {
private sessions = new Map<string, TrackedSession>()
private pendingSessions = new Set<string>()
private deps: TmuxUtilDeps
private polling: SessionPollingController
private pollingManager: TmuxPollingManager
constructor(ctx: PluginInput, tmuxConfig: TmuxConfig, deps: TmuxUtilDeps = defaultTmuxDeps) {
this.client = ctx.client
this.tmuxConfig = tmuxConfig
@ -57,15 +66,11 @@ export class TmuxSessionManager {
const defaultPort = process.env.OPENCODE_PORT ?? "4096"
this.serverUrl = ctx.serverUrl?.toString() ?? `http://localhost:${defaultPort}`
this.sourcePaneId = deps.getCurrentPaneId()
this.polling = createSessionPollingController({
client: this.client,
tmuxConfig: this.tmuxConfig,
serverUrl: this.serverUrl,
sourcePaneId: this.sourcePaneId,
sessions: this.sessions,
})
this.pollingManager = new TmuxPollingManager(
this.client,
this.sessions,
this.closeSessionById.bind(this)
)
log("[tmux-session-manager] initialized", {
configEnabled: this.tmuxConfig.enabled,
tmuxConfig: this.tmuxConfig,
@ -73,7 +78,6 @@ export class TmuxSessionManager {
sourcePaneId: this.sourcePaneId,
})
}
private isEnabled(): boolean {
return this.tmuxConfig.enabled && this.deps.isInsideTmux()
}
@ -93,58 +97,254 @@ export class TmuxSessionManager {
}))
}
private async waitForSessionReady(sessionId: string): Promise<boolean> {
const startTime = Date.now()
while (Date.now() - startTime < SESSION_READY_TIMEOUT_MS) {
try {
const statusResult = await this.client.session.status({ path: undefined })
const allStatuses = (statusResult.data ?? {}) as Record<string, { type: string }>
if (allStatuses[sessionId]) {
log("[tmux-session-manager] session ready", {
sessionId,
status: allStatuses[sessionId].type,
waitedMs: Date.now() - startTime,
})
return true
}
} catch (err) {
log("[tmux-session-manager] session status check error", { error: String(err) })
}
await new Promise((resolve) => setTimeout(resolve, SESSION_READY_POLL_INTERVAL_MS))
}
log("[tmux-session-manager] session ready timeout", {
sessionId,
timeoutMs: SESSION_READY_TIMEOUT_MS,
})
return false
}
// NOTE: Exposed (via `as any`) for test stability checks.
// Actual polling is owned by TmuxPollingManager.
private async pollSessions(): Promise<void> {
await (this.pollingManager as any).pollSessions()
}
async onSessionCreated(event: SessionCreatedEvent): Promise<void> {
await handleSessionCreated(
{
client: this.client,
tmuxConfig: this.tmuxConfig,
serverUrl: this.serverUrl,
sourcePaneId: this.sourcePaneId,
sessions: this.sessions,
pendingSessions: this.pendingSessions,
isInsideTmux: this.deps.isInsideTmux,
isEnabled: () => this.isEnabled(),
getCapacityConfig: () => this.getCapacityConfig(),
getSessionMappings: () => this.getSessionMappings(),
waitForSessionReady: (sessionId) => this.polling.waitForSessionReady(sessionId),
startPolling: () => this.polling.startPolling(),
},
event,
)
const enabled = this.isEnabled()
log("[tmux-session-manager] onSessionCreated called", {
enabled,
tmuxConfigEnabled: this.tmuxConfig.enabled,
isInsideTmux: this.deps.isInsideTmux(),
eventType: event.type,
infoId: event.properties?.info?.id,
infoParentID: event.properties?.info?.parentID,
})
if (!enabled) return
if (event.type !== "session.created") return
const info = event.properties?.info
if (!info?.id || !info?.parentID) return
const sessionId = info.id
const title = info.title ?? "Subagent"
if (this.sessions.has(sessionId) || this.pendingSessions.has(sessionId)) {
log("[tmux-session-manager] session already tracked or pending", { sessionId })
return
}
if (!this.sourcePaneId) {
log("[tmux-session-manager] no source pane id")
return
}
this.pendingSessions.add(sessionId)
try {
const state = await queryWindowState(this.sourcePaneId)
if (!state) {
log("[tmux-session-manager] failed to query window state")
return
}
log("[tmux-session-manager] window state queried", {
windowWidth: state.windowWidth,
mainPane: state.mainPane?.paneId,
agentPaneCount: state.agentPanes.length,
agentPanes: state.agentPanes.map((p) => p.paneId),
})
const decision = decideSpawnActions(
state,
sessionId,
title,
this.getCapacityConfig(),
this.getSessionMappings()
)
log("[tmux-session-manager] spawn decision", {
canSpawn: decision.canSpawn,
reason: decision.reason,
actionCount: decision.actions.length,
actions: decision.actions.map((a) => {
if (a.type === "close") return { type: "close", paneId: a.paneId }
if (a.type === "replace") return { type: "replace", paneId: a.paneId, newSessionId: a.newSessionId }
return { type: "spawn", sessionId: a.sessionId }
}),
})
if (!decision.canSpawn) {
log("[tmux-session-manager] cannot spawn", { reason: decision.reason })
return
}
const result = await executeActions(
decision.actions,
{ config: this.tmuxConfig, serverUrl: this.serverUrl, windowState: state }
)
for (const { action, result: actionResult } of result.results) {
if (action.type === "close" && actionResult.success) {
this.sessions.delete(action.sessionId)
log("[tmux-session-manager] removed closed session from cache", {
sessionId: action.sessionId,
})
}
if (action.type === "replace" && actionResult.success) {
this.sessions.delete(action.oldSessionId)
log("[tmux-session-manager] removed replaced session from cache", {
oldSessionId: action.oldSessionId,
newSessionId: action.newSessionId,
})
}
}
if (result.success && result.spawnedPaneId) {
const sessionReady = await this.waitForSessionReady(sessionId)
if (!sessionReady) {
log("[tmux-session-manager] session not ready after timeout, tracking anyway", {
sessionId,
paneId: result.spawnedPaneId,
})
}
const now = Date.now()
this.sessions.set(sessionId, {
sessionId,
paneId: result.spawnedPaneId,
description: title,
createdAt: new Date(now),
lastSeenAt: new Date(now),
})
log("[tmux-session-manager] pane spawned and tracked", {
sessionId,
paneId: result.spawnedPaneId,
sessionReady,
})
this.pollingManager.startPolling()
} else {
log("[tmux-session-manager] spawn failed", {
success: result.success,
results: result.results.map((r) => ({
type: r.action.type,
success: r.result.success,
error: r.result.error,
})),
})
}
} finally {
this.pendingSessions.delete(sessionId)
}
}
async onSessionDeleted(event: { sessionID: string }): Promise<void> {
await handleSessionDeleted(
{
tmuxConfig: this.tmuxConfig,
serverUrl: this.serverUrl,
sourcePaneId: this.sourcePaneId,
sessions: this.sessions,
isEnabled: () => this.isEnabled(),
getSessionMappings: () => this.getSessionMappings(),
stopPolling: () => this.polling.stopPolling(),
},
event,
)
if (!this.isEnabled()) return
if (!this.sourcePaneId) return
const tracked = this.sessions.get(event.sessionID)
if (!tracked) return
log("[tmux-session-manager] onSessionDeleted", { sessionId: event.sessionID })
const state = await queryWindowState(this.sourcePaneId)
if (!state) {
this.sessions.delete(event.sessionID)
return
}
const closeAction = decideCloseAction(state, event.sessionID, this.getSessionMappings())
if (closeAction) {
await executeAction(closeAction, { config: this.tmuxConfig, serverUrl: this.serverUrl, windowState: state })
}
this.sessions.delete(event.sessionID)
if (this.sessions.size === 0) {
this.pollingManager.stopPolling()
}
}
private async closeSessionById(sessionId: string): Promise<void> {
const tracked = this.sessions.get(sessionId)
if (!tracked) return
log("[tmux-session-manager] closing session pane", {
sessionId,
paneId: tracked.paneId,
})
const state = this.sourcePaneId ? await queryWindowState(this.sourcePaneId) : null
if (state) {
await executeAction(
{ type: "close", paneId: tracked.paneId, sessionId },
{ config: this.tmuxConfig, serverUrl: this.serverUrl, windowState: state }
)
}
this.sessions.delete(sessionId)
if (this.sessions.size === 0) {
this.pollingManager.stopPolling()
}
}
createEventHandler(): (input: { event: { type: string; properties?: unknown } }) => Promise<void> {
return async (input) => {
await this.onSessionCreated(coerceSessionCreatedEvent(input.event))
await this.onSessionCreated(input.event as SessionCreatedEvent)
}
}
async pollSessions(): Promise<void> {
return this.polling.pollSessions()
}
async cleanup(): Promise<void> {
await cleanupTmuxSessions({
tmuxConfig: this.tmuxConfig,
serverUrl: this.serverUrl,
sourcePaneId: this.sourcePaneId,
sessions: this.sessions,
stopPolling: () => this.polling.stopPolling(),
})
this.pollingManager.stopPolling()
if (this.sessions.size > 0) {
log("[tmux-session-manager] closing all panes", { count: this.sessions.size })
const state = this.sourcePaneId ? await queryWindowState(this.sourcePaneId) : null
if (state) {
const closePromises = Array.from(this.sessions.values()).map((s) =>
executeAction(
{ type: "close", paneId: s.paneId, sessionId: s.sessionId },
{ config: this.tmuxConfig, serverUrl: this.serverUrl, windowState: state }
).catch((err) =>
log("[tmux-session-manager] cleanup error for pane", {
paneId: s.paneId,
error: String(err),
}),
),
)
await Promise.all(closePromises)
}
this.sessions.clear()
}
log("[tmux-session-manager] cleanup complete")
}
}

View File

@ -0,0 +1,66 @@
/// <reference types="bun-types" />
import { describe, expect, mock, test } from "bun:test"
const execSyncMock = mock(() => {
throw new Error("execSync should not be called")
})
const execFileSyncMock = mock((file: string, args: string[], _opts: { cwd?: string }) => {
if (file !== "git") throw new Error(`unexpected file: ${file}`)
const subcommand = args[0]
if (subcommand === "diff") {
return "1\t2\tfile.ts\n"
}
if (subcommand === "status") {
return " M file.ts\n"
}
throw new Error(`unexpected args: ${args.join(" ")}`)
})
mock.module("node:child_process", () => ({
execSync: execSyncMock,
execFileSync: execFileSyncMock,
}))
const { collectGitDiffStats } = await import("./collect-git-diff-stats")
describe("collectGitDiffStats", () => {
test("uses execFileSync with arg arrays (no shell injection)", () => {
//#given
const directory = "/tmp/safe-repo;touch /tmp/pwn"
//#when
const result = collectGitDiffStats(directory)
//#then
expect(execSyncMock).not.toHaveBeenCalled()
expect(execFileSyncMock).toHaveBeenCalledTimes(2)
const [firstCallFile, firstCallArgs, firstCallOpts] = execFileSyncMock.mock
.calls[0]! as unknown as [string, string[], { cwd?: string }]
expect(firstCallFile).toBe("git")
expect(firstCallArgs).toEqual(["diff", "--numstat", "HEAD"])
expect(firstCallOpts.cwd).toBe(directory)
expect(firstCallArgs.join(" ")).not.toContain(directory)
const [secondCallFile, secondCallArgs, secondCallOpts] = execFileSyncMock.mock
.calls[1]! as unknown as [string, string[], { cwd?: string }]
expect(secondCallFile).toBe("git")
expect(secondCallArgs).toEqual(["status", "--porcelain"])
expect(secondCallOpts.cwd).toBe(directory)
expect(secondCallArgs.join(" ")).not.toContain(directory)
expect(result).toEqual([
{
path: "file.ts",
added: 1,
removed: 2,
status: "modified",
},
])
})
})

View File

@ -8,30 +8,32 @@ export function migrateConfigFile(
configPath: string,
rawConfig: Record<string, unknown>
): boolean {
// Work on a deep copy — only apply changes to rawConfig if file write succeeds
const copy = structuredClone(rawConfig)
let needsWrite = false
// Load previously applied migrations
const existingMigrations = Array.isArray(rawConfig._migrations)
? new Set(rawConfig._migrations as string[])
const existingMigrations = Array.isArray(copy._migrations)
? new Set(copy._migrations as string[])
: new Set<string>()
const allNewMigrations: string[] = []
if (rawConfig.agents && typeof rawConfig.agents === "object") {
const { migrated, changed } = migrateAgentNames(rawConfig.agents as Record<string, unknown>)
if (copy.agents && typeof copy.agents === "object") {
const { migrated, changed } = migrateAgentNames(copy.agents as Record<string, unknown>)
if (changed) {
rawConfig.agents = migrated
copy.agents = migrated
needsWrite = true
}
}
// Migrate model versions in agents (skip already-applied migrations)
if (rawConfig.agents && typeof rawConfig.agents === "object") {
if (copy.agents && typeof copy.agents === "object") {
const { migrated, changed, newMigrations } = migrateModelVersions(
rawConfig.agents as Record<string, unknown>,
copy.agents as Record<string, unknown>,
existingMigrations
)
if (changed) {
rawConfig.agents = migrated
copy.agents = migrated
needsWrite = true
log("Migrated model versions in agents config")
}
@ -39,13 +41,13 @@ export function migrateConfigFile(
}
// Migrate model versions in categories (skip already-applied migrations)
if (rawConfig.categories && typeof rawConfig.categories === "object") {
if (copy.categories && typeof copy.categories === "object") {
const { migrated, changed, newMigrations } = migrateModelVersions(
rawConfig.categories as Record<string, unknown>,
copy.categories as Record<string, unknown>,
existingMigrations
)
if (changed) {
rawConfig.categories = migrated
copy.categories = migrated
needsWrite = true
log("Migrated model versions in categories config")
}
@ -56,20 +58,20 @@ export function migrateConfigFile(
if (allNewMigrations.length > 0) {
const updatedMigrations = Array.from(existingMigrations)
updatedMigrations.push(...allNewMigrations)
rawConfig._migrations = updatedMigrations
copy._migrations = updatedMigrations
needsWrite = true
}
if (rawConfig.omo_agent) {
rawConfig.sisyphus_agent = rawConfig.omo_agent
delete rawConfig.omo_agent
if (copy.omo_agent) {
copy.sisyphus_agent = copy.omo_agent
delete copy.omo_agent
needsWrite = true
}
if (rawConfig.disabled_agents && Array.isArray(rawConfig.disabled_agents)) {
if (copy.disabled_agents && Array.isArray(copy.disabled_agents)) {
const migrated: string[] = []
let changed = false
for (const agent of rawConfig.disabled_agents as string[]) {
for (const agent of copy.disabled_agents as string[]) {
const newAgent = AGENT_NAME_MAP[agent.toLowerCase()] ?? AGENT_NAME_MAP[agent] ?? agent
if (newAgent !== agent) {
changed = true
@ -77,15 +79,15 @@ export function migrateConfigFile(
migrated.push(newAgent)
}
if (changed) {
rawConfig.disabled_agents = migrated
copy.disabled_agents = migrated
needsWrite = true
}
}
if (rawConfig.disabled_hooks && Array.isArray(rawConfig.disabled_hooks)) {
const { migrated, changed, removed } = migrateHookNames(rawConfig.disabled_hooks as string[])
if (copy.disabled_hooks && Array.isArray(copy.disabled_hooks)) {
const { migrated, changed, removed } = migrateHookNames(copy.disabled_hooks as string[])
if (changed) {
rawConfig.disabled_hooks = migrated
copy.disabled_hooks = migrated
needsWrite = true
}
if (removed.length > 0) {
@ -99,13 +101,25 @@ export function migrateConfigFile(
try {
const timestamp = new Date().toISOString().replace(/[:.]/g, "-")
const backupPath = `${configPath}.bak.${timestamp}`
fs.copyFileSync(configPath, backupPath)
try {
fs.copyFileSync(configPath, backupPath)
} catch {
// Original file may not exist yet — skip backup
}
fs.writeFileSync(configPath, JSON.stringify(rawConfig, null, 2) + "\n", "utf-8")
fs.writeFileSync(configPath, JSON.stringify(copy, null, 2) + "\n", "utf-8")
log(`Migrated config file: ${configPath} (backup: ${backupPath})`)
} catch (err) {
log(`Failed to write migrated config to ${configPath}:`, err)
// File write failed — rawConfig is untouched, preserving user's original values
return false
}
// File write succeeded — apply changes to the original rawConfig
for (const key of Object.keys(rawConfig)) {
delete rawConfig[key]
}
Object.assign(rawConfig, copy)
}
return needsWrite

View File

@ -5,198 +5,174 @@ import { tmpdir } from "os"
import { join } from "path"
let __resetModelCache: () => void
let fetchAvailableModels: (
client?: unknown,
options?: { connectedProviders?: string[] | null },
) => Promise<Set<string>>
let fetchAvailableModels: (client?: unknown, options?: { connectedProviders?: string[] | null }) => Promise<Set<string>>
let fuzzyMatchModel: (target: string, available: Set<string>, providers?: string[]) => string | null
let isModelAvailable: (targetModel: string, availableModels: Set<string>) => boolean
let getConnectedProviders: (client: unknown) => Promise<string[]>
beforeAll(async () => {
;({
__resetModelCache,
fetchAvailableModels,
fuzzyMatchModel,
isModelAvailable,
getConnectedProviders,
} = await import("./model-availability"))
;({
__resetModelCache,
fetchAvailableModels,
fuzzyMatchModel,
isModelAvailable,
getConnectedProviders,
} = await import("./model-availability"))
})
describe("fetchAvailableModels", () => {
let tempDir: string
let tempDir: string
let originalXdgCache: string | undefined
beforeEach(() => {
__resetModelCache()
tempDir = mkdtempSync(join(tmpdir(), "opencode-test-"))
beforeEach(() => {
__resetModelCache()
tempDir = mkdtempSync(join(tmpdir(), "opencode-test-"))
originalXdgCache = process.env.XDG_CACHE_HOME
process.env.XDG_CACHE_HOME = tempDir
})
})
afterEach(() => {
if (originalXdgCache !== undefined) {
afterEach(() => {
if (originalXdgCache !== undefined) {
process.env.XDG_CACHE_HOME = originalXdgCache
} else {
delete process.env.XDG_CACHE_HOME
}
rmSync(tempDir, { recursive: true, force: true })
})
rmSync(tempDir, { recursive: true, force: true })
})
function writeModelsCache(data: Record<string, any>) {
const cacheDir = join(tempDir, "opencode")
require("fs").mkdirSync(cacheDir, { recursive: true })
writeFileSync(join(cacheDir, "models.json"), JSON.stringify(data))
}
function writeModelsCache(data: Record<string, any>) {
const cacheDir = join(tempDir, "opencode")
require("fs").mkdirSync(cacheDir, { recursive: true })
writeFileSync(join(cacheDir, "models.json"), JSON.stringify(data))
}
it("#given cache file with models #when fetchAvailableModels called with connectedProviders #then returns Set of model IDs", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: {
id: "anthropic",
models: { "claude-opus-4-6": { id: "claude-opus-4-6" } },
},
google: { id: "google", models: { "gemini-3-pro": { id: "gemini-3-pro" } } },
})
it("#given cache file with models #when fetchAvailableModels called with connectedProviders #then returns Set of model IDs", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: { id: "anthropic", models: { "claude-opus-4-6": { id: "claude-opus-4-6" } } },
google: { id: "google", models: { "gemini-3-pro": { id: "gemini-3-pro" } } },
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["openai", "anthropic", "google"],
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["openai", "anthropic", "google"]
})
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(3)
expect(result.has("openai/gpt-5.2")).toBe(true)
expect(result.has("anthropic/claude-opus-4-6")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(true)
})
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(3)
expect(result.has("openai/gpt-5.2")).toBe(true)
expect(result.has("anthropic/claude-opus-4-6")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(true)
})
it("#given connectedProviders unknown #when fetchAvailableModels called without options #then returns empty Set", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
})
it("#given connectedProviders unknown #when fetchAvailableModels called without options #then returns empty Set", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
})
const result = await fetchAvailableModels()
const result = await fetchAvailableModels()
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
it("#given connectedProviders unknown but client can list #when fetchAvailableModels called with client #then returns models from API filtered by connected providers", async () => {
const client = {
provider: {
list: async () => ({ data: { connected: ["openai"] } }),
},
model: {
list: async () => ({
data: [
{ id: "gpt-5.3-codex", provider: "openai" },
{ id: "gemini-3-pro", provider: "google" },
],
}),
},
}
it("#given connectedProviders unknown but client can list #when fetchAvailableModels called with client #then returns models from API filtered by connected providers", async () => {
const client = {
provider: {
list: async () => ({ data: { connected: ["openai"] } }),
},
model: {
list: async () => ({
data: [
{ id: "gpt-5.3-codex", provider: "openai" },
{ id: "gemini-3-pro", provider: "google" },
],
}),
},
}
const result = await fetchAvailableModels(client)
const result = await fetchAvailableModels(client)
expect(result).toBeInstanceOf(Set)
expect(result.has("openai/gpt-5.3-codex")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(false)
})
expect(result).toBeInstanceOf(Set)
expect(result.has("openai/gpt-5.3-codex")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(false)
})
it("#given cache file not found #when fetchAvailableModels called with connectedProviders #then returns empty Set", async () => {
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["openai"],
})
it("#given cache file not found #when fetchAvailableModels called with connectedProviders #then returns empty Set", async () => {
const result = await fetchAvailableModels(undefined, { connectedProviders: ["openai"] })
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
it("#given cache missing but client can list #when fetchAvailableModels called with connectedProviders #then returns models from API", async () => {
const client = {
provider: {
list: async () => ({ data: { connected: ["openai", "google"] } }),
},
model: {
list: async () => ({
data: [
{ id: "gpt-5.3-codex", provider: "openai" },
{ id: "gemini-3-pro", provider: "google" },
],
}),
},
}
it("#given cache missing but client can list #when fetchAvailableModels called with connectedProviders #then returns models from API", async () => {
const client = {
provider: {
list: async () => ({ data: { connected: ["openai", "google"] } }),
},
model: {
list: async () => ({
data: [
{ id: "gpt-5.3-codex", provider: "openai" },
{ id: "gemini-3-pro", provider: "google" },
],
}),
},
}
const result = await fetchAvailableModels(client, {
connectedProviders: ["openai", "google"],
})
const result = await fetchAvailableModels(client, { connectedProviders: ["openai", "google"] })
expect(result).toBeInstanceOf(Set)
expect(result.has("openai/gpt-5.3-codex")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(true)
})
expect(result).toBeInstanceOf(Set)
expect(result.has("openai/gpt-5.3-codex")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(true)
})
it("#given cache read twice #when second call made with same providers #then reads fresh each time", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: {
id: "anthropic",
models: { "claude-opus-4-6": { id: "claude-opus-4-6" } },
},
})
it("#given cache read twice #when second call made with same providers #then reads fresh each time", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: { id: "anthropic", models: { "claude-opus-4-6": { id: "claude-opus-4-6" } } },
})
const result1 = await fetchAvailableModels(undefined, {
connectedProviders: ["openai"],
})
const result2 = await fetchAvailableModels(undefined, {
connectedProviders: ["openai"],
})
const result1 = await fetchAvailableModels(undefined, { connectedProviders: ["openai"] })
const result2 = await fetchAvailableModels(undefined, { connectedProviders: ["openai"] })
expect(result1.size).toBe(result2.size)
expect(result1.has("openai/gpt-5.2")).toBe(true)
})
expect(result1.size).toBe(result2.size)
expect(result1.has("openai/gpt-5.2")).toBe(true)
})
it("#given empty providers in cache #when fetchAvailableModels called with connectedProviders #then returns empty Set", async () => {
writeModelsCache({})
it("#given empty providers in cache #when fetchAvailableModels called with connectedProviders #then returns empty Set", async () => {
writeModelsCache({})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["openai"],
})
const result = await fetchAvailableModels(undefined, { connectedProviders: ["openai"] })
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
it("#given cache file with various providers #when fetchAvailableModels called with all providers #then extracts all IDs correctly", async () => {
writeModelsCache({
openai: {
id: "openai",
models: { "gpt-5.3-codex": { id: "gpt-5.3-codex" } },
},
anthropic: {
id: "anthropic",
models: { "claude-sonnet-4-5": { id: "claude-sonnet-4-5" } },
},
google: {
id: "google",
models: { "gemini-3-flash": { id: "gemini-3-flash" } },
},
opencode: { id: "opencode", models: { "gpt-5-nano": { id: "gpt-5-nano" } } },
})
it("#given cache file with various providers #when fetchAvailableModels called with all providers #then extracts all IDs correctly", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.3-codex": { id: "gpt-5.3-codex" } } },
anthropic: { id: "anthropic", models: { "claude-sonnet-4-5": { id: "claude-sonnet-4-5" } } },
google: { id: "google", models: { "gemini-3-flash": { id: "gemini-3-flash" } } },
opencode: { id: "opencode", models: { "gpt-5-nano": { id: "gpt-5-nano" } } },
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["openai", "anthropic", "google", "opencode"],
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["openai", "anthropic", "google", "opencode"]
})
expect(result.size).toBe(4)
expect(result.has("openai/gpt-5.3-codex")).toBe(true)
expect(result.has("anthropic/claude-sonnet-4-5")).toBe(true)
expect(result.has("google/gemini-3-flash")).toBe(true)
expect(result.has("opencode/gpt-5-nano")).toBe(true)
})
expect(result.size).toBe(4)
expect(result.has("openai/gpt-5.3-codex")).toBe(true)
expect(result.has("anthropic/claude-sonnet-4-5")).toBe(true)
expect(result.has("google/gemini-3-flash")).toBe(true)
expect(result.has("opencode/gpt-5-nano")).toBe(true)
})
})
describe("fuzzyMatchModel", () => {
// given available models from multiple providers
// when searching for a substring match
// then return the matching model
it("should match substring in model name", () => {
const available = new Set([
"openai/gpt-5.2",
@ -207,6 +183,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2")
})
// given available model with preview suffix
// when searching with provider-prefixed base model
// then return preview model
it("should match preview suffix for gemini-3-flash", () => {
const available = new Set(["google/gemini-3-flash-preview"])
const result = fuzzyMatchModel(
@ -217,6 +196,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("google/gemini-3-flash-preview")
})
// given available models with partial matches
// when searching for a substring
// then return exact match if it exists
it("should prefer exact match over substring match", () => {
const available = new Set([
"openai/gpt-5.2",
@ -227,6 +209,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2")
})
// given available models with multiple substring matches
// when searching for a substring
// then return the shorter model name (more specific)
it("should prefer shorter model name when multiple matches exist", () => {
const available = new Set([
"openai/gpt-5.2-ultra",
@ -236,6 +221,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2-ultra")
})
// given available models with claude variants
// when searching for claude-opus
// then return matching claude-opus model
it("should match claude-opus to claude-opus-4-6", () => {
const available = new Set([
"anthropic/claude-opus-4-6",
@ -245,6 +233,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("anthropic/claude-opus-4-6")
})
// given available models from multiple providers
// when providers filter is specified
// then only search models from specified providers
it("should filter by provider when providers array is given", () => {
const available = new Set([
"openai/gpt-5.2",
@ -255,6 +246,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2")
})
// given available models from multiple providers
// when providers filter excludes matching models
// then return null
it("should return null when provider filter excludes all matches", () => {
const available = new Set([
"openai/gpt-5.2",
@ -264,6 +258,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBeNull()
})
// given available models
// when no substring match exists
// then return null
it("should return null when no match found", () => {
const available = new Set([
"openai/gpt-5.2",
@ -273,6 +270,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBeNull()
})
// given available models with different cases
// when searching with different case
// then match case-insensitively
it("should match case-insensitively", () => {
const available = new Set([
"openai/gpt-5.2",
@ -282,6 +282,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2")
})
// given available models with exact match and longer variants
// when searching for exact match
// then return exact match first
it("should prioritize exact match over longer variants", () => {
const available = new Set([
"anthropic/claude-opus-4-6",
@ -291,6 +294,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("anthropic/claude-opus-4-6")
})
// given available models with similar model IDs (e.g., glm-4.7 and glm-4.7-free)
// when searching for the longer variant (glm-4.7-free)
// then return exact model ID match, not the shorter one
it("should prefer exact model ID match over shorter substring match", () => {
const available = new Set([
"zai-coding-plan/glm-4.7",
@ -300,6 +306,9 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("zai-coding-plan/glm-4.7-free")
})
// given available models with similar model IDs
// when searching for the shorter variant
// then return the shorter match (existing behavior preserved)
it("should still prefer shorter match when searching for shorter variant", () => {
const available = new Set([
"zai-coding-plan/glm-4.7",
@ -309,12 +318,21 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("zai-coding-plan/glm-4.7")
})
// given same model ID from multiple providers
// when searching for exact model ID
// then return shortest full string (preserves tie-break behavior)
it("should use shortest tie-break when multiple providers have same model ID", () => {
const available = new Set(["opencode/gpt-5.2", "openai/gpt-5.2"])
const available = new Set([
"opencode/gpt-5.2",
"openai/gpt-5.2",
])
const result = fuzzyMatchModel("gpt-5.2", available)
expect(result).toBe("openai/gpt-5.2")
})
// given available models with multiple providers
// when multiple providers are specified
// then search all specified providers
it("should search all specified providers", () => {
const available = new Set([
"openai/gpt-5.2",
@ -325,12 +343,21 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2")
})
// given available models with provider prefix
// when searching with provider filter
// then only match models with correct provider prefix
it("should only match models with correct provider prefix", () => {
const available = new Set(["openai/gpt-5.2", "anthropic/gpt-something"])
const available = new Set([
"openai/gpt-5.2",
"anthropic/gpt-something",
])
const result = fuzzyMatchModel("gpt", available, ["openai"])
expect(result).toBe("openai/gpt-5.2")
})
// given empty available set
// when searching
// then return null
it("should return null for empty available set", () => {
const available = new Set<string>()
const result = fuzzyMatchModel("gpt", available)
@ -339,13 +366,16 @@ describe("fuzzyMatchModel", () => {
})
describe("getConnectedProviders", () => {
// given SDK client with connected providers
// when provider.list returns data
// then returns connected array
it("should return connected providers from SDK", async () => {
const mockClient = {
provider: {
list: async () => ({
data: { connected: ["anthropic", "opencode", "google"] },
}),
},
data: { connected: ["anthropic", "opencode", "google"] }
})
}
}
const result = await getConnectedProviders(mockClient)
@ -353,13 +383,14 @@ describe("getConnectedProviders", () => {
expect(result).toEqual(["anthropic", "opencode", "google"])
})
// given SDK client
// when provider.list throws error
// then returns empty array
it("should return empty array on SDK error", async () => {
const mockClient = {
provider: {
list: async () => {
throw new Error("Network error")
},
},
list: async () => { throw new Error("Network error") }
}
}
const result = await getConnectedProviders(mockClient)
@ -367,11 +398,14 @@ describe("getConnectedProviders", () => {
expect(result).toEqual([])
})
// given SDK client with empty connected array
// when provider.list returns empty
// then returns empty array
it("should return empty array when no providers connected", async () => {
const mockClient = {
provider: {
list: async () => ({ data: { connected: [] } }),
},
list: async () => ({ data: { connected: [] } })
}
}
const result = await getConnectedProviders(mockClient)
@ -379,6 +413,9 @@ describe("getConnectedProviders", () => {
expect(result).toEqual([])
})
// given SDK client without provider.list method
// when getConnectedProviders called
// then returns empty array
it("should return empty array when client.provider.list not available", async () => {
const mockClient = {}
@ -387,17 +424,23 @@ describe("getConnectedProviders", () => {
expect(result).toEqual([])
})
// given null client
// when getConnectedProviders called
// then returns empty array
it("should return empty array for null client", async () => {
const result = await getConnectedProviders(null)
expect(result).toEqual([])
})
// given SDK client with missing data.connected
// when provider.list returns without connected field
// then returns empty array
it("should return empty array when data.connected is undefined", async () => {
const mockClient = {
provider: {
list: async () => ({ data: {} }),
},
list: async () => ({ data: {} })
}
}
const result = await getConnectedProviders(mockClient)
@ -432,6 +475,9 @@ describe("fetchAvailableModels with connected providers filtering", () => {
writeFileSync(join(cacheDir, "models.json"), JSON.stringify(data))
}
// given cache with multiple providers
// when connectedProviders specifies one provider
// then only returns models from that provider
it("should filter models by connected providers", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -440,7 +486,7 @@ describe("fetchAvailableModels with connected providers filtering", () => {
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["anthropic"],
connectedProviders: ["anthropic"]
})
expect(result.size).toBe(1)
@ -449,6 +495,9 @@ describe("fetchAvailableModels with connected providers filtering", () => {
expect(result.has("google/gemini-3-pro")).toBe(false)
})
// given cache with multiple providers
// when connectedProviders specifies multiple providers
// then returns models from all specified providers
it("should filter models by multiple connected providers", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -457,7 +506,7 @@ describe("fetchAvailableModels with connected providers filtering", () => {
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["anthropic", "google"],
connectedProviders: ["anthropic", "google"]
})
expect(result.size).toBe(2)
@ -466,6 +515,9 @@ describe("fetchAvailableModels with connected providers filtering", () => {
expect(result.has("openai/gpt-5.2")).toBe(false)
})
// given cache with models
// when connectedProviders is empty array
// then returns empty set
it("should return empty set when connectedProviders is empty", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -473,12 +525,15 @@ describe("fetchAvailableModels with connected providers filtering", () => {
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: [],
connectedProviders: []
})
expect(result.size).toBe(0)
})
// given cache with models
// when connectedProviders is undefined (no options)
// then returns empty set (triggers fallback in resolver)
it("should return empty set when connectedProviders not specified", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -490,18 +545,24 @@ describe("fetchAvailableModels with connected providers filtering", () => {
expect(result.size).toBe(0)
})
// given cache with models
// when connectedProviders contains provider not in cache
// then returns empty set for that provider
it("should handle provider not in cache gracefully", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["azure"],
connectedProviders: ["azure"]
})
expect(result.size).toBe(0)
})
// given cache with models and mixed connected providers
// when some providers exist in cache and some don't
// then returns models only from matching providers
it("should return models from providers that exist in both cache and connected list", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -509,31 +570,39 @@ describe("fetchAvailableModels with connected providers filtering", () => {
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["anthropic", "azure", "unknown"],
connectedProviders: ["anthropic", "azure", "unknown"]
})
expect(result.size).toBe(1)
expect(result.has("anthropic/claude-opus-4-6")).toBe(true)
})
// given filtered fetch
// when called twice with different filters
// then does NOT use cache (dynamic per-session)
it("should not cache filtered results", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: { models: { "claude-opus-4-6": { id: "claude-opus-4-6" } } },
})
// First call with anthropic
const result1 = await fetchAvailableModels(undefined, {
connectedProviders: ["anthropic"],
connectedProviders: ["anthropic"]
})
expect(result1.size).toBe(1)
// Second call with openai - should work, not cached
const result2 = await fetchAvailableModels(undefined, {
connectedProviders: ["openai"],
connectedProviders: ["openai"]
})
expect(result2.size).toBe(1)
expect(result2.has("openai/gpt-5.2")).toBe(true)
})
// given connectedProviders unknown
// when called twice without connectedProviders
// then always returns empty set (triggers fallback)
it("should return empty set when connectedProviders unknown", async () => {
writeModelsCache({
openai: { models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -567,19 +636,13 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
rmSync(tempDir, { recursive: true, force: true })
})
function writeProviderModelsCache(data: {
models: Record<string, string[] | any[]>
connected: string[]
}) {
function writeProviderModelsCache(data: { models: Record<string, string[] | any[]>; connected: string[] }) {
const cacheDir = join(tempDir, "oh-my-opencode")
require("fs").mkdirSync(cacheDir, { recursive: true })
writeFileSync(
join(cacheDir, "provider-models.json"),
JSON.stringify({
...data,
updatedAt: new Date().toISOString(),
}),
)
writeFileSync(join(cacheDir, "provider-models.json"), JSON.stringify({
...data,
updatedAt: new Date().toISOString()
}))
}
function writeModelsCache(data: Record<string, any>) {
@ -588,21 +651,24 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
writeFileSync(join(cacheDir, "models.json"), JSON.stringify(data))
}
// given provider-models cache exists (whitelist-filtered)
// when fetchAvailableModels called
// then uses provider-models cache instead of models.json
it("should prefer provider-models cache over models.json", async () => {
writeProviderModelsCache({
models: {
opencode: ["glm-4.7-free", "gpt-5-nano"],
anthropic: ["claude-opus-4-6"],
anthropic: ["claude-opus-4-6"]
},
connected: ["opencode", "anthropic"],
connected: ["opencode", "anthropic"]
})
writeModelsCache({
opencode: { models: { "glm-4.7-free": {}, "gpt-5-nano": {}, "gpt-5.2": {} } },
anthropic: { models: { "claude-opus-4-6": {}, "claude-sonnet-4-5": {} } },
anthropic: { models: { "claude-opus-4-6": {}, "claude-sonnet-4-5": {} } }
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["opencode", "anthropic"],
connectedProviders: ["opencode", "anthropic"]
})
expect(result.size).toBe(3)
@ -613,9 +679,13 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
expect(result.has("anthropic/claude-sonnet-4-5")).toBe(false)
})
// given provider-models cache exists but has no models (API failure)
// when fetchAvailableModels called
// then falls back to models.json so fuzzy matching can still work
it("should fall back to models.json when provider-models cache is empty", async () => {
writeProviderModelsCache({
models: {},
models: {
},
connected: ["google"],
})
writeModelsCache({
@ -625,22 +695,21 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
const availableModels = await fetchAvailableModels(undefined, {
connectedProviders: ["google"],
})
const match = fuzzyMatchModel(
"google/gemini-3-flash",
availableModels,
["google"],
)
const match = fuzzyMatchModel("google/gemini-3-flash", availableModels, ["google"])
expect(match).toBe("google/gemini-3-flash-preview")
})
// given only models.json exists (no provider-models cache)
// when fetchAvailableModels called
// then falls back to models.json (no whitelist filtering)
it("should fallback to models.json when provider-models cache not found", async () => {
writeModelsCache({
opencode: { models: { "glm-4.7-free": {}, "gpt-5-nano": {}, "gpt-5.2": {} } },
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["opencode"],
connectedProviders: ["opencode"]
})
expect(result.size).toBe(3)
@ -649,18 +718,21 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
expect(result.has("opencode/gpt-5.2")).toBe(true)
})
// given provider-models cache with whitelist
// when connectedProviders filters to subset
// then only returns models from connected providers
it("should filter by connectedProviders even with provider-models cache", async () => {
writeProviderModelsCache({
models: {
opencode: ["glm-4.7-free"],
anthropic: ["claude-opus-4-6"],
google: ["gemini-3-pro"],
google: ["gemini-3-pro"]
},
connected: ["opencode", "anthropic", "google"],
connected: ["opencode", "anthropic", "google"]
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["opencode"],
connectedProviders: ["opencode"]
})
expect(result.size).toBe(1)
@ -673,25 +745,15 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
writeProviderModelsCache({
models: {
ollama: [
{
id: "ministral-3:14b-32k-agent",
provider: "ollama",
context: 32768,
output: 8192,
},
{
id: "qwen3-coder:32k-agent",
provider: "ollama",
context: 32768,
output: 8192,
},
],
{ id: "ministral-3:14b-32k-agent", provider: "ollama", context: 32768, output: 8192 },
{ id: "qwen3-coder:32k-agent", provider: "ollama", context: 32768, output: 8192 }
]
},
connected: ["ollama"],
connected: ["ollama"]
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["ollama"],
connectedProviders: ["ollama"]
})
expect(result.size).toBe(2)
@ -705,14 +767,14 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
anthropic: ["claude-opus-4-6", "claude-sonnet-4-5"],
ollama: [
{ id: "ministral-3:14b-32k-agent", provider: "ollama" },
{ id: "qwen3-coder:32k-agent", provider: "ollama" },
],
{ id: "qwen3-coder:32k-agent", provider: "ollama" }
]
},
connected: ["anthropic", "ollama"],
connected: ["anthropic", "ollama"]
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["anthropic", "ollama"],
connectedProviders: ["anthropic", "ollama"]
})
expect(result.size).toBe(4)
@ -730,14 +792,14 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
{ provider: "ollama" },
{ id: "", provider: "ollama" },
null,
"string-model",
],
"string-model"
]
},
connected: ["ollama"],
connected: ["ollama"]
})
const result = await fetchAvailableModels(undefined, {
connectedProviders: ["ollama"],
connectedProviders: ["ollama"]
})
expect(result.size).toBe(2)
@ -749,10 +811,7 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
describe("isModelAvailable", () => {
it("returns true when model exists via fuzzy match", () => {
// given
const available = new Set([
"openai/gpt-5.3-codex",
"anthropic/claude-opus-4-6",
])
const available = new Set(["openai/gpt-5.3-codex", "anthropic/claude-opus-4-6"])
// when
const result = isModelAvailable("gpt-5.3-codex", available)

View File

@ -1,4 +1,358 @@
export { fetchAvailableModels, getConnectedProviders } from "./available-models-fetcher"
export { isAnyFallbackModelAvailable, isAnyProviderConnected } from "./fallback-model-availability"
export { __resetModelCache, isModelCacheAvailable } from "./model-cache-availability"
export { fuzzyMatchModel, isModelAvailable } from "./model-name-matcher"
import { existsSync, readFileSync } from "fs"
import { join } from "path"
import { log } from "./logger"
import { getOpenCodeCacheDir } from "./data-path"
import * as connectedProvidersCache from "./connected-providers-cache"
/**
* Fuzzy match a target model name against available models
*
* @param target - The model name or substring to search for (e.g., "gpt-5.2", "claude-opus")
* @param available - Set of available model names in format "provider/model-name"
* @param providers - Optional array of provider names to filter by (e.g., ["openai", "anthropic"])
* @returns The matched model name or null if no match found
*
* Matching priority:
* 1. Exact match (if exists)
* 2. Shorter model name (more specific)
*
* Matching is case-insensitive substring match.
* If providers array is given, only models starting with "provider/" are considered.
*
* @example
* const available = new Set(["openai/gpt-5.2", "openai/gpt-5.3-codex", "anthropic/claude-opus-4-6"])
* fuzzyMatchModel("gpt-5.2", available) // → "openai/gpt-5.2"
* fuzzyMatchModel("claude", available, ["openai"]) // → null (provider filter excludes anthropic)
*/
function normalizeModelName(name: string): string {
return name
.toLowerCase()
.replace(/claude-(opus|sonnet|haiku)-4-5/g, "claude-$1-4.5")
.replace(/claude-(opus|sonnet|haiku)-4\.5/g, "claude-$1-4.5")
}
export function fuzzyMatchModel(
target: string,
available: Set<string>,
providers?: string[],
): string | null {
log("[fuzzyMatchModel] called", { target, availableCount: available.size, providers })
if (available.size === 0) {
log("[fuzzyMatchModel] empty available set")
return null
}
const targetNormalized = normalizeModelName(target)
// Filter by providers if specified
let candidates = Array.from(available)
if (providers && providers.length > 0) {
const providerSet = new Set(providers)
candidates = candidates.filter((model) => {
const [provider] = model.split("/")
return providerSet.has(provider)
})
log("[fuzzyMatchModel] filtered by providers", { candidateCount: candidates.length, candidates: candidates.slice(0, 10) })
}
if (candidates.length === 0) {
log("[fuzzyMatchModel] no candidates after filter")
return null
}
// Find all matches (case-insensitive substring match with normalization)
const matches = candidates.filter((model) =>
normalizeModelName(model).includes(targetNormalized),
)
log("[fuzzyMatchModel] substring matches", { targetNormalized, matchCount: matches.length, matches })
if (matches.length === 0) {
return null
}
// Priority 1: Exact match (normalized full model string)
const exactMatch = matches.find((model) => normalizeModelName(model) === targetNormalized)
if (exactMatch) {
log("[fuzzyMatchModel] exact match found", { exactMatch })
return exactMatch
}
// Priority 2: Exact model ID match (part after provider/)
// This ensures "glm-4.7-free" matches "zai-coding-plan/glm-4.7-free" over "zai-coding-plan/glm-4.7"
// Use filter + shortest to handle multi-provider cases (e.g., openai/gpt-5.2 + opencode/gpt-5.2)
const exactModelIdMatches = matches.filter((model) => {
const modelId = model.split("/").slice(1).join("/")
return normalizeModelName(modelId) === targetNormalized
})
if (exactModelIdMatches.length > 0) {
const result = exactModelIdMatches.reduce((shortest, current) =>
current.length < shortest.length ? current : shortest,
)
log("[fuzzyMatchModel] exact model ID match found", { result, candidateCount: exactModelIdMatches.length })
return result
}
// Priority 3: Shorter model name (more specific, fallback for partial matches)
const result = matches.reduce((shortest, current) =>
current.length < shortest.length ? current : shortest,
)
log("[fuzzyMatchModel] shortest match", { result })
return result
}
/**
* Check if a target model is available (fuzzy match by model name, no provider filtering)
*
* @param targetModel - Model name to check (e.g., "gpt-5.3-codex")
* @param availableModels - Set of available models in "provider/model" format
* @returns true if model is available, false otherwise
*/
export function isModelAvailable(
targetModel: string,
availableModels: Set<string>,
): boolean {
return fuzzyMatchModel(targetModel, availableModels) !== null
}
export async function getConnectedProviders(client: any): Promise<string[]> {
if (!client?.provider?.list) {
log("[getConnectedProviders] client.provider.list not available")
return []
}
try {
const result = await client.provider.list()
const connected = result.data?.connected ?? []
log("[getConnectedProviders] connected providers", { count: connected.length, providers: connected })
return connected
} catch (err) {
log("[getConnectedProviders] SDK error", { error: String(err) })
return []
}
}
export async function fetchAvailableModels(
client?: any,
options?: { connectedProviders?: string[] | null }
): Promise<Set<string>> {
let connectedProviders = options?.connectedProviders ?? null
let connectedProvidersUnknown = connectedProviders === null
log("[fetchAvailableModels] CALLED", {
connectedProvidersUnknown,
connectedProviders: options?.connectedProviders
})
if (connectedProvidersUnknown && client) {
const liveConnected = await getConnectedProviders(client)
if (liveConnected.length > 0) {
connectedProviders = liveConnected
connectedProvidersUnknown = false
log("[fetchAvailableModels] connected providers fetched from client", { count: liveConnected.length })
}
}
if (connectedProvidersUnknown) {
if (client?.model?.list) {
const modelSet = new Set<string>()
try {
const modelsResult = await client.model.list()
const models = modelsResult.data ?? []
for (const model of models) {
if (model?.provider && model?.id) {
modelSet.add(`${model.provider}/${model.id}`)
}
}
log("[fetchAvailableModels] fetched models from client without provider filter", {
count: modelSet.size,
})
return modelSet
} catch (err) {
log("[fetchAvailableModels] client.model.list error", { error: String(err) })
}
}
log("[fetchAvailableModels] connected providers unknown, returning empty set for fallback resolution")
return new Set<string>()
}
const connectedProvidersList = connectedProviders ?? []
const connectedSet = new Set(connectedProvidersList)
const modelSet = new Set<string>()
const providerModelsCache = connectedProvidersCache.readProviderModelsCache()
if (providerModelsCache) {
const providerCount = Object.keys(providerModelsCache.models).length
if (providerCount === 0) {
log("[fetchAvailableModels] provider-models cache empty, falling back to models.json")
} else {
log("[fetchAvailableModels] using provider-models cache (whitelist-filtered)")
const modelsByProvider = providerModelsCache.models as Record<string, Array<string | { id?: string }>>
for (const [providerId, modelIds] of Object.entries(modelsByProvider)) {
if (!connectedSet.has(providerId)) {
continue
}
for (const modelItem of modelIds) {
// Handle both string[] (legacy) and object[] (with metadata) formats
const modelId = typeof modelItem === 'string'
? modelItem
: (modelItem as any)?.id
if (modelId) {
modelSet.add(`${providerId}/${modelId}`)
}
}
}
log("[fetchAvailableModels] parsed from provider-models cache", {
count: modelSet.size,
connectedProviders: connectedProvidersList.slice(0, 5)
})
if (modelSet.size > 0) {
return modelSet
}
log("[fetchAvailableModels] provider-models cache produced no models for connected providers, falling back to models.json")
}
}
log("[fetchAvailableModels] provider-models cache not found, falling back to models.json")
const cacheFile = join(getOpenCodeCacheDir(), "models.json")
if (!existsSync(cacheFile)) {
log("[fetchAvailableModels] models.json cache file not found, falling back to client")
} else {
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as Record<string, { id?: string; models?: Record<string, { id?: string }> }>
const providerIds = Object.keys(data)
log("[fetchAvailableModels] providers found in models.json", { count: providerIds.length, providers: providerIds.slice(0, 10) })
for (const providerId of providerIds) {
if (!connectedSet.has(providerId)) {
continue
}
const provider = data[providerId]
const models = provider?.models
if (!models || typeof models !== "object") continue
for (const modelKey of Object.keys(models)) {
modelSet.add(`${providerId}/${modelKey}`)
}
}
log("[fetchAvailableModels] parsed models from models.json (NO whitelist filtering)", {
count: modelSet.size,
connectedProviders: connectedProvidersList.slice(0, 5)
})
if (modelSet.size > 0) {
return modelSet
}
} catch (err) {
log("[fetchAvailableModels] error", { error: String(err) })
}
}
if (client?.model?.list) {
try {
const modelsResult = await client.model.list()
const models = modelsResult.data ?? []
for (const model of models) {
if (!model?.provider || !model?.id) continue
if (connectedSet.has(model.provider)) {
modelSet.add(`${model.provider}/${model.id}`)
}
}
log("[fetchAvailableModels] fetched models from client (filtered)", {
count: modelSet.size,
connectedProviders: connectedProvidersList.slice(0, 5),
})
} catch (err) {
log("[fetchAvailableModels] client.model.list error", { error: String(err) })
}
}
return modelSet
}
export function isAnyFallbackModelAvailable(
fallbackChain: Array<{ providers: string[]; model: string }>,
availableModels: Set<string>,
): boolean {
// If we have models, check them first
if (availableModels.size > 0) {
for (const entry of fallbackChain) {
const hasAvailableProvider = entry.providers.some((provider) => {
return fuzzyMatchModel(entry.model, availableModels, [provider]) !== null
})
if (hasAvailableProvider) {
return true
}
}
}
// Fallback: check if any provider in the chain is connected
// This handles race conditions where availableModels is empty or incomplete
// but we know the provider is connected.
const connectedProviders = connectedProvidersCache.readConnectedProvidersCache()
if (connectedProviders) {
const connectedSet = new Set(connectedProviders)
for (const entry of fallbackChain) {
if (entry.providers.some((p) => connectedSet.has(p))) {
log("[isAnyFallbackModelAvailable] model not in available set, but provider is connected", {
model: entry.model,
availableCount: availableModels.size,
})
return true
}
}
}
return false
}
export function isAnyProviderConnected(
providers: string[],
availableModels: Set<string>,
): boolean {
if (availableModels.size > 0) {
const providerSet = new Set(providers)
for (const model of availableModels) {
const [provider] = model.split("/")
if (providerSet.has(provider)) {
log("[isAnyProviderConnected] found model from required provider", { provider, model })
return true
}
}
}
const connectedProviders = connectedProvidersCache.readConnectedProvidersCache()
if (connectedProviders) {
const connectedSet = new Set(connectedProviders)
for (const provider of providers) {
if (connectedSet.has(provider)) {
log("[isAnyProviderConnected] provider connected via cache", { provider })
return true
}
}
}
return false
}
export function __resetModelCache(): void {}
export function isModelCacheAvailable(): boolean {
if (connectedProvidersCache.hasProviderModelsCache()) {
return true
}
const cacheFile = join(getOpenCodeCacheDir(), "models.json")
return existsSync(cacheFile)
}

View File

@ -1,16 +1,37 @@
import { log } from "./logger"
import { readConnectedProvidersCache } from "./connected-providers-cache"
import * as connectedProvidersCache from "./connected-providers-cache"
import { fuzzyMatchModel } from "./model-availability"
import type {
ModelResolutionRequest,
ModelResolutionResult,
} from "./model-resolution-types"
import type { FallbackEntry } from "./model-requirements"
export type {
ModelResolutionProvenance,
ModelResolutionRequest,
ModelResolutionResult,
} from "./model-resolution-types"
export type ModelResolutionRequest = {
intent?: {
uiSelectedModel?: string
userModel?: string
categoryDefaultModel?: string
}
constraints: {
availableModels: Set<string>
connectedProviders?: string[] | null
}
policy?: {
fallbackChain?: FallbackEntry[]
systemDefaultModel?: string
}
}
export type ModelResolutionProvenance =
| "override"
| "category-default"
| "provider-fallback"
| "system-default"
export type ModelResolutionResult = {
model: string
provenance: ModelResolutionProvenance
variant?: string
attempted?: string[]
reason?: string
}
function normalizeModel(model?: string): string | undefined {
const trimmed = model?.trim()
@ -53,7 +74,7 @@ export function resolveModelPipeline(
return { model: match, provenance: "category-default", attempted }
}
} else {
const connectedProviders = readConnectedProvidersCache()
const connectedProviders = constraints.connectedProviders ?? connectedProvidersCache.readConnectedProvidersCache()
if (connectedProviders === null) {
log("Model resolved via category default (no cache, first run)", {
model: normalizedCategoryDefault,
@ -78,7 +99,7 @@ export function resolveModelPipeline(
if (fallbackChain && fallbackChain.length > 0) {
if (availableModels.size === 0) {
const connectedProviders = readConnectedProvidersCache()
const connectedProviders = constraints.connectedProviders ?? connectedProvidersCache.readConnectedProvidersCache()
const connectedSet = connectedProviders ? new Set(connectedProviders) : null
if (connectedSet === null) {

View File

@ -10,11 +10,27 @@ import * as connectedProvidersCache from "../../shared/connected-providers-cache
const SYSTEM_DEFAULT_MODEL = "anthropic/claude-sonnet-4-5"
const TEST_CONNECTED_PROVIDERS = ["anthropic", "google", "openai"]
const TEST_AVAILABLE_MODELS = new Set([
"anthropic/claude-opus-4-6",
"anthropic/claude-sonnet-4-5",
"anthropic/claude-haiku-4-5",
"google/gemini-3-pro",
"google/gemini-3-flash",
"openai/gpt-5.2",
"openai/gpt-5.3-codex",
])
function createTestAvailableModels(): Set<string> {
return new Set(TEST_AVAILABLE_MODELS)
}
describe("sisyphus-task", () => {
let cacheSpy: ReturnType<typeof spyOn>
let providerModelsSpy: ReturnType<typeof spyOn>
beforeEach(() => {
mock.restore()
__resetModelCache()
clearSkillCache()
__setTimingConfig({
@ -271,6 +287,8 @@ describe("sisyphus-task", () => {
const tool = createDelegateTask({
manager: mockManager,
client: mockClient,
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -324,6 +342,8 @@ describe("sisyphus-task", () => {
const tool = createDelegateTask({
manager: mockManager,
client: mockClient,
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -436,6 +456,8 @@ describe("sisyphus-task", () => {
const tool = createDelegateTask({
manager: mockManager,
client: mockClient,
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const metadataCalls: Array<{ title?: string; metadata?: Record<string, unknown> }> = []
@ -727,6 +749,8 @@ describe("sisyphus-task", () => {
userCategories: {
ultrabrain: { model: "openai/gpt-5.2", variant: "xhigh" },
},
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -790,6 +814,8 @@ describe("sisyphus-task", () => {
const tool = createDelegateTask({
manager: mockManager,
client: mockClient,
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -1950,6 +1976,8 @@ describe("sisyphus-task", () => {
client: mockClient,
// userCategories: undefined - use DEFAULT_CATEGORIES only
// sisyphusJuniorModel: undefined
connectedProvidersOverride: null,
availableModelsOverride: new Set(),
})
const toolContext = {
@ -2013,6 +2041,8 @@ describe("sisyphus-task", () => {
userCategories: {
"fallback-test": { model: "anthropic/claude-opus-4-6" },
},
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -2072,6 +2102,8 @@ describe("sisyphus-task", () => {
manager: mockManager,
client: mockClient,
sisyphusJuniorModel: "anthropic/claude-sonnet-4-5",
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -2135,6 +2167,8 @@ describe("sisyphus-task", () => {
userCategories: {
ultrabrain: { model: "openai/gpt-5.3-codex" },
},
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -2194,6 +2228,8 @@ describe("sisyphus-task", () => {
manager: mockManager,
client: mockClient,
sisyphusJuniorModel: "anthropic/claude-sonnet-4-5",
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {
@ -3207,6 +3243,8 @@ describe("sisyphus-task", () => {
manager: mockManager,
client: mockClient,
// no agentOverrides
connectedProvidersOverride: TEST_CONNECTED_PROVIDERS,
availableModelsOverride: createTestAvailableModels(),
})
const toolContext = {

View File

@ -50,6 +50,15 @@ export interface DelegateTaskToolOptions {
manager: BackgroundManager
client: OpencodeClient
directory: string
/**
* Test hook: bypass global cache reads (Bun runs tests in parallel).
* If provided, resolveCategoryExecution/resolveSubagentExecution uses this instead of reading from disk cache.
*/
connectedProvidersOverride?: string[] | null
/**
* Test hook: bypass fetchAvailableModels() by providing an explicit available model set.
*/
availableModelsOverride?: Set<string>
userCategories?: CategoriesConfig
gitMasterConfig?: GitMasterConfig
sisyphusJuniorModel?: string