fix: use cache file for model availability instead of SDK calls

- Changed fetchAvailableModels to read from ~/.cache/opencode/models.json
- Prevents plugin startup hanging caused by SDK client.config.providers() call
- Updated doctor model-resolution check to show available models from cache
- Added cache info display: provider count, model count, refresh command
This commit is contained in:
justsisyphus 2026-01-23 14:09:37 +09:00
parent f4348885f2
commit ab3e622baa
5 changed files with 148 additions and 80 deletions

View File

@ -115,8 +115,9 @@ describe("model-resolution check", () => {
// #then: Details should contain agent/category resolution info
expect(result.details).toBeDefined()
expect(result.details!.length).toBeGreaterThan(0)
// Should have Current Models header and sections
expect(result.details!.some((d) => d.includes("Current Models"))).toBe(true)
// Should have Available Models and Configured Models headers
expect(result.details!.some((d) => d.includes("Available Models"))).toBe(true)
expect(result.details!.some((d) => d.includes("Configured Models"))).toBe(true)
expect(result.details!.some((d) => d.includes("Agents:"))).toBe(true)
expect(result.details!.some((d) => d.includes("Categories:"))).toBe(true)
// Should have legend

View File

@ -1,4 +1,4 @@
import { readFileSync } from "node:fs"
import { readFileSync, existsSync } from "node:fs"
import type { CheckResult, CheckDefinition } from "../types"
import { CHECK_IDS, CHECK_NAMES } from "../constants"
import { parseJsonc, detectConfigFile } from "../../../shared"
@ -10,6 +10,38 @@ import {
import { homedir } from "node:os"
import { join } from "node:path"
function getOpenCodeCacheDir(): string {
const xdgCache = process.env.XDG_CACHE_HOME
if (xdgCache) return join(xdgCache, "opencode")
return join(homedir(), ".cache", "opencode")
}
function loadAvailableModels(): { providers: string[]; modelCount: number; cacheExists: boolean } {
const cacheFile = join(getOpenCodeCacheDir(), "models.json")
if (!existsSync(cacheFile)) {
return { providers: [], modelCount: 0, cacheExists: false }
}
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as Record<string, { models?: Record<string, unknown> }>
const providers = Object.keys(data)
let modelCount = 0
for (const providerId of providers) {
const models = data[providerId]?.models
if (models && typeof models === "object") {
modelCount += Object.keys(models).length
}
}
return { providers, modelCount, cacheExists: true }
} catch {
return { providers: [], modelCount: 0, cacheExists: false }
}
}
const PACKAGE_NAME = "oh-my-opencode"
const USER_CONFIG_DIR = join(homedir(), ".config", "opencode")
const USER_CONFIG_BASE = join(USER_CONFIG_DIR, PACKAGE_NAME)
@ -155,10 +187,28 @@ function getEffectiveVariant(requirement: ModelRequirement): string | undefined
return firstEntry?.variant ?? requirement.variant
}
function buildDetailsArray(info: ModelResolutionInfo): string[] {
interface AvailableModelsInfo {
providers: string[]
modelCount: number
cacheExists: boolean
}
function buildDetailsArray(info: ModelResolutionInfo, available: AvailableModelsInfo): string[] {
const details: string[] = []
details.push("═══ Current Models ═══")
details.push("═══ Available Models (from cache) ═══")
details.push("")
if (available.cacheExists) {
details.push(` Providers: ${available.providers.length} (${available.providers.slice(0, 8).join(", ")}${available.providers.length > 8 ? "..." : ""})`)
details.push(` Total models: ${available.modelCount}`)
details.push(` Cache: ~/.cache/opencode/models.json`)
details.push(` Refresh: opencode models --refresh`)
} else {
details.push(" ⚠ Cache not found. Run 'opencode' to populate.")
}
details.push("")
details.push("═══ Configured Models ═══")
details.push("")
details.push("Agents:")
for (const agent of info.agents) {
@ -182,6 +232,7 @@ function buildDetailsArray(info: ModelResolutionInfo): string[] {
export async function checkModelResolution(): Promise<CheckResult> {
const config = loadConfig() ?? {}
const info = getModelResolutionInfoWithOverrides(config)
const available = loadAvailableModels()
const agentCount = info.agents.length
const categoryCount = info.categories.length
@ -190,12 +241,13 @@ export async function checkModelResolution(): Promise<CheckResult> {
const totalOverrides = agentOverrides + categoryOverrides
const overrideNote = totalOverrides > 0 ? ` (${totalOverrides} override${totalOverrides > 1 ? "s" : ""})` : ""
const cacheNote = available.cacheExists ? `, ${available.modelCount} available` : ", cache not found"
return {
name: CHECK_NAMES[CHECK_IDS.MODEL_RESOLUTION],
status: "pass",
message: `${agentCount} agents, ${categoryCount} categories${overrideNote}`,
details: buildDetailsArray(info),
status: available.cacheExists ? "pass" : "warn",
message: `${agentCount} agents, ${categoryCount} categories${overrideNote}${cacheNote}`,
details: buildDetailsArray(info, available),
}
}

View File

@ -79,6 +79,7 @@ import { createModelCacheState, getModelLimit } from "./plugin-state";
import { createConfigHandler } from "./plugin-handlers";
const OhMyOpenCodePlugin: Plugin = async (ctx) => {
log("[OhMyOpenCodePlugin] ENTRY - plugin loading", { directory: ctx.directory })
// Start background tmux check immediately
startTmuxCheck();

View File

@ -1,26 +1,43 @@
import { describe, it, expect, beforeEach } from "bun:test"
import { describe, it, expect, beforeEach, afterEach } from "bun:test"
import { mkdtempSync, writeFileSync, rmSync } from "fs"
import { tmpdir } from "os"
import { join } from "path"
import { fetchAvailableModels, fuzzyMatchModel, __resetModelCache } from "./model-availability"
describe("fetchAvailableModels", () => {
let mockClient: any
let tempDir: string
let originalXdgCache: string | undefined
beforeEach(() => {
__resetModelCache()
tempDir = mkdtempSync(join(tmpdir(), "opencode-test-"))
originalXdgCache = process.env.XDG_CACHE_HOME
process.env.XDG_CACHE_HOME = tempDir
})
it("#given API returns list of models #when fetchAvailableModels called #then returns Set of model IDs", async () => {
const mockModels = [
{ id: "openai/gpt-5.2", name: "GPT-5.2" },
{ id: "anthropic/claude-opus-4-5", name: "Claude Opus 4.5" },
{ id: "google/gemini-3-pro", name: "Gemini 3 Pro" },
]
mockClient = {
model: {
list: async () => mockModels,
},
afterEach(() => {
if (originalXdgCache !== undefined) {
process.env.XDG_CACHE_HOME = originalXdgCache
} else {
delete process.env.XDG_CACHE_HOME
}
rmSync(tempDir, { recursive: true, force: true })
})
const result = await fetchAvailableModels(mockClient)
function writeModelsCache(data: Record<string, any>) {
const cacheDir = join(tempDir, "opencode")
require("fs").mkdirSync(cacheDir, { recursive: true })
writeFileSync(join(cacheDir, "models.json"), JSON.stringify(data))
}
it("#given cache file with models #when fetchAvailableModels called #then returns Set of model IDs", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: { id: "anthropic", models: { "claude-opus-4-5": { id: "claude-opus-4-5" } } },
google: { id: "google", models: { "gemini-3-pro": { id: "gemini-3-pro" } } },
})
const result = await fetchAvailableModels()
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(3)
@ -29,77 +46,50 @@ describe("fetchAvailableModels", () => {
expect(result.has("google/gemini-3-pro")).toBe(true)
})
it("#given API fails #when fetchAvailableModels called #then returns empty Set without throwing", async () => {
mockClient = {
model: {
list: async () => {
throw new Error("API connection failed")
},
},
}
const result = await fetchAvailableModels(mockClient)
it("#given cache file not found #when fetchAvailableModels called #then returns empty Set", async () => {
const result = await fetchAvailableModels()
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
it("#given API called twice #when second call made #then uses cached result without re-fetching", async () => {
let callCount = 0
const mockModels = [
{ id: "openai/gpt-5.2", name: "GPT-5.2" },
{ id: "anthropic/claude-opus-4-5", name: "Claude Opus 4.5" },
]
mockClient = {
model: {
list: async () => {
callCount++
return mockModels
},
},
}
it("#given cache read twice #when second call made #then uses cached result", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
anthropic: { id: "anthropic", models: { "claude-opus-4-5": { id: "claude-opus-4-5" } } },
})
const result1 = await fetchAvailableModels(mockClient)
const result2 = await fetchAvailableModels(mockClient)
const result1 = await fetchAvailableModels()
const result2 = await fetchAvailableModels()
expect(callCount).toBe(1)
expect(result1).toEqual(result2)
expect(result1.has("openai/gpt-5.2")).toBe(true)
})
it("#given empty model list from API #when fetchAvailableModels called #then returns empty Set", async () => {
mockClient = {
model: {
list: async () => [],
},
}
it("#given empty providers in cache #when fetchAvailableModels called #then returns empty Set", async () => {
writeModelsCache({})
const result = await fetchAvailableModels(mockClient)
const result = await fetchAvailableModels()
expect(result).toBeInstanceOf(Set)
expect(result.size).toBe(0)
})
it("#given API returns models with various formats #when fetchAvailableModels called #then extracts all IDs correctly", async () => {
const mockModels = [
{ id: "openai/gpt-5.2-codex", name: "GPT-5.2 Codex" },
{ id: "anthropic/claude-sonnet-4-5", name: "Claude Sonnet 4.5" },
{ id: "google/gemini-3-flash", name: "Gemini 3 Flash" },
{ id: "opencode/grok-code", name: "Grok Code" },
]
mockClient = {
model: {
list: async () => mockModels,
},
}
it("#given cache file with various providers #when fetchAvailableModels called #then extracts all IDs correctly", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2-codex": { id: "gpt-5.2-codex" } } },
anthropic: { id: "anthropic", models: { "claude-sonnet-4-5": { id: "claude-sonnet-4-5" } } },
google: { id: "google", models: { "gemini-3-flash": { id: "gemini-3-flash" } } },
opencode: { id: "opencode", models: { "grok-code": { id: "grok-code" } } },
})
const result = await fetchAvailableModels(mockClient)
const result = await fetchAvailableModels()
expect(result.size).toBe(4)
expect(result.has("openai/gpt-5.2-codex")).toBe(true)
expect(result.has("anthropic/claude-sonnet-4-5")).toBe(true)
expect(result.has("google/gemini-3-flash")).toBe(true)
expect(result.has("opencode/grok-code")).toBe(true)
expect(result.has("opencode/grok-code")).toBe(true)
})
})

View File

@ -3,6 +3,9 @@
* Supports substring matching with provider filtering and priority-based selection
*/
import { existsSync, readFileSync } from "fs"
import { homedir } from "os"
import { join } from "path"
import { log } from "./logger"
/**
@ -90,33 +93,54 @@ export function fuzzyMatchModel(
let cachedModels: Set<string> | null = null
export async function fetchAvailableModels(client: any): Promise<Set<string>> {
function getOpenCodeCacheDir(): string {
const xdgCache = process.env.XDG_CACHE_HOME
if (xdgCache) return join(xdgCache, "opencode")
return join(homedir(), ".cache", "opencode")
}
export async function fetchAvailableModels(_client?: any): Promise<Set<string>> {
log("[fetchAvailableModels] CALLED")
if (cachedModels !== null) {
log("[fetchAvailableModels] returning cached models", { count: cachedModels.size, models: Array.from(cachedModels).slice(0, 20) })
return cachedModels
}
const modelSet = new Set<string>()
const cacheFile = join(getOpenCodeCacheDir(), "models.json")
log("[fetchAvailableModels] reading cache file", { cacheFile })
if (!existsSync(cacheFile)) {
log("[fetchAvailableModels] cache file not found, returning empty set")
return modelSet
}
try {
const models = await client.model.list()
const modelSet = new Set<string>()
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as Record<string, { id?: string; models?: Record<string, { id?: string }> }>
log("[fetchAvailableModels] raw response", { isArray: Array.isArray(models), length: Array.isArray(models) ? models.length : 0, sample: Array.isArray(models) ? models.slice(0, 5) : models })
const providerIds = Object.keys(data)
log("[fetchAvailableModels] providers found", { count: providerIds.length, providers: providerIds.slice(0, 10) })
if (Array.isArray(models)) {
for (const model of models) {
if (model.id && typeof model.id === "string") {
modelSet.add(model.id)
}
for (const providerId of providerIds) {
const provider = data[providerId]
const models = provider?.models
if (!models || typeof models !== "object") continue
for (const modelKey of Object.keys(models)) {
modelSet.add(`${providerId}/${modelKey}`)
}
}
log("[fetchAvailableModels] parsed models", { count: modelSet.size, models: Array.from(modelSet) })
log("[fetchAvailableModels] parsed models", { count: modelSet.size, models: Array.from(modelSet).slice(0, 20) })
cachedModels = modelSet
return modelSet
} catch (err) {
log("[fetchAvailableModels] error", { error: String(err) })
return new Set<string>()
return modelSet
}
}