fix: improve model resolution with client API fallback and explicit model passing

- fetchAvailableModels now falls back to client.model.list() when cache is empty
- provider-models cache empty → models.json → client API (3-tier fallback)
- look-at tool explicitly passes registered agent's model to session.prompt
- Ensures multimodal-looker uses correctly resolved model (e.g., gemini-3-flash-preview)
- Add comprehensive tests for fuzzy matching and fallback scenarios
This commit is contained in:
justsisyphus 2026-01-30 16:57:13 +09:00
parent 2f7e188cb5
commit 80ee52fe3b
5 changed files with 298 additions and 46 deletions

View File

@ -59,6 +59,28 @@ describe("fetchAvailableModels", () => {
expect(result.size).toBe(0)
})
it("#given connectedProviders unknown but client can list #when fetchAvailableModels called with client #then returns models from API filtered by connected providers", async () => {
const client = {
provider: {
list: async () => ({ data: { connected: ["openai"] } }),
},
model: {
list: async () => ({
data: [
{ id: "gpt-5.2-codex", provider: "openai" },
{ id: "gemini-3-pro", provider: "google" },
],
}),
},
}
const result = await fetchAvailableModels(client)
expect(result).toBeInstanceOf(Set)
expect(result.has("openai/gpt-5.2-codex")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(false)
})
it("#given cache file not found #when fetchAvailableModels called with connectedProviders #then returns empty Set", async () => {
const result = await fetchAvailableModels(undefined, { connectedProviders: ["openai"] })
@ -66,6 +88,28 @@ describe("fetchAvailableModels", () => {
expect(result.size).toBe(0)
})
it("#given cache missing but client can list #when fetchAvailableModels called with connectedProviders #then returns models from API", async () => {
const client = {
provider: {
list: async () => ({ data: { connected: ["openai", "google"] } }),
},
model: {
list: async () => ({
data: [
{ id: "gpt-5.2-codex", provider: "openai" },
{ id: "gemini-3-pro", provider: "google" },
],
}),
},
}
const result = await fetchAvailableModels(client, { connectedProviders: ["openai", "google"] })
expect(result).toBeInstanceOf(Set)
expect(result.has("openai/gpt-5.2-codex")).toBe(true)
expect(result.has("google/gemini-3-pro")).toBe(true)
})
it("#given cache read twice #when second call made with same providers #then reads fresh each time", async () => {
writeModelsCache({
openai: { id: "openai", models: { "gpt-5.2": { id: "gpt-5.2" } } },
@ -122,6 +166,19 @@ describe("fuzzyMatchModel", () => {
expect(result).toBe("openai/gpt-5.2")
})
// #given available model with preview suffix
// #when searching with provider-prefixed base model
// #then return preview model
it("should match preview suffix for gemini-3-flash", () => {
const available = new Set(["google/gemini-3-flash-preview"])
const result = fuzzyMatchModel(
"google/gemini-3-flash",
available,
["google"],
)
expect(result).toBe("google/gemini-3-flash-preview")
})
// #given available models with partial matches
// #when searching for a substring
// #then return exact match if it exists
@ -569,6 +626,27 @@ describe("fetchAvailableModels with provider-models cache (whitelist-filtered)",
expect(result.has("anthropic/claude-sonnet-4-5")).toBe(false)
})
//#given provider-models cache exists but has no models (API failure)
//#when fetchAvailableModels called
//#then falls back to models.json so fuzzy matching can still work
it("should fall back to models.json when provider-models cache is empty", async () => {
writeProviderModelsCache({
models: {
},
connected: ["google"],
})
writeModelsCache({
google: { models: { "gemini-3-flash-preview": {} } },
})
const availableModels = await fetchAvailableModels(undefined, {
connectedProviders: ["google"],
})
const match = fuzzyMatchModel("google/gemini-3-flash", availableModels, ["google"])
expect(match).toBe("google/gemini-3-flash-preview")
})
//#given only models.json exists (no provider-models cache)
//#when fetchAvailableModels called
//#then falls back to models.json (no whitelist filtering)

View File

@ -119,85 +119,144 @@ export async function getConnectedProviders(client: any): Promise<string[]> {
}
export async function fetchAvailableModels(
_client?: any,
client?: any,
options?: { connectedProviders?: string[] | null }
): Promise<Set<string>> {
const connectedProvidersUnknown = options?.connectedProviders === null || options?.connectedProviders === undefined
let connectedProviders = options?.connectedProviders ?? null
let connectedProvidersUnknown = connectedProviders === null
log("[fetchAvailableModels] CALLED", {
connectedProvidersUnknown,
connectedProviders: options?.connectedProviders
})
if (connectedProvidersUnknown && client) {
const liveConnected = await getConnectedProviders(client)
if (liveConnected.length > 0) {
connectedProviders = liveConnected
connectedProvidersUnknown = false
log("[fetchAvailableModels] connected providers fetched from client", { count: liveConnected.length })
}
}
if (connectedProvidersUnknown) {
if (client?.model?.list) {
const modelSet = new Set<string>()
try {
const modelsResult = await client.model.list()
const models = modelsResult.data ?? []
for (const model of models) {
if (model?.provider && model?.id) {
modelSet.add(`${model.provider}/${model.id}`)
}
}
log("[fetchAvailableModels] fetched models from client without provider filter", {
count: modelSet.size,
})
return modelSet
} catch (err) {
log("[fetchAvailableModels] client.model.list error", { error: String(err) })
}
}
log("[fetchAvailableModels] connected providers unknown, returning empty set for fallback resolution")
return new Set<string>()
}
const connectedProviders = options!.connectedProviders!
const connectedSet = new Set(connectedProviders)
const connectedProvidersList = connectedProviders ?? []
const connectedSet = new Set(connectedProvidersList)
const modelSet = new Set<string>()
const providerModelsCache = readProviderModelsCache()
if (providerModelsCache) {
log("[fetchAvailableModels] using provider-models cache (whitelist-filtered)")
for (const [providerId, modelIds] of Object.entries(providerModelsCache.models)) {
if (!connectedSet.has(providerId)) {
continue
const providerCount = Object.keys(providerModelsCache.models).length
if (providerCount === 0) {
log("[fetchAvailableModels] provider-models cache empty, falling back to models.json")
} else {
log("[fetchAvailableModels] using provider-models cache (whitelist-filtered)")
for (const [providerId, modelIds] of Object.entries(providerModelsCache.models)) {
if (!connectedSet.has(providerId)) {
continue
}
for (const modelId of modelIds) {
modelSet.add(`${providerId}/${modelId}`)
}
}
for (const modelId of modelIds) {
modelSet.add(`${providerId}/${modelId}`)
log("[fetchAvailableModels] parsed from provider-models cache", {
count: modelSet.size,
connectedProviders: connectedProvidersList.slice(0, 5)
})
if (modelSet.size > 0) {
return modelSet
}
log("[fetchAvailableModels] provider-models cache produced no models for connected providers, falling back to models.json")
}
log("[fetchAvailableModels] parsed from provider-models cache", {
count: modelSet.size,
connectedProviders: connectedProviders.slice(0, 5)
})
return modelSet
}
log("[fetchAvailableModels] provider-models cache not found, falling back to models.json")
const cacheFile = join(getOpenCodeCacheDir(), "models.json")
if (!existsSync(cacheFile)) {
log("[fetchAvailableModels] models.json cache file not found, returning empty set")
return modelSet
}
log("[fetchAvailableModels] models.json cache file not found, falling back to client")
} else {
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as Record<string, { id?: string; models?: Record<string, { id?: string }> }>
try {
const content = readFileSync(cacheFile, "utf-8")
const data = JSON.parse(content) as Record<string, { id?: string; models?: Record<string, { id?: string }> }>
const providerIds = Object.keys(data)
log("[fetchAvailableModels] providers found in models.json", { count: providerIds.length, providers: providerIds.slice(0, 10) })
const providerIds = Object.keys(data)
log("[fetchAvailableModels] providers found in models.json", { count: providerIds.length, providers: providerIds.slice(0, 10) })
for (const providerId of providerIds) {
if (!connectedSet.has(providerId)) {
continue
}
for (const providerId of providerIds) {
if (!connectedSet.has(providerId)) {
continue
const provider = data[providerId]
const models = provider?.models
if (!models || typeof models !== "object") continue
for (const modelKey of Object.keys(models)) {
modelSet.add(`${providerId}/${modelKey}`)
}
}
const provider = data[providerId]
const models = provider?.models
if (!models || typeof models !== "object") continue
log("[fetchAvailableModels] parsed models from models.json (NO whitelist filtering)", {
count: modelSet.size,
connectedProviders: connectedProvidersList.slice(0, 5)
})
for (const modelKey of Object.keys(models)) {
modelSet.add(`${providerId}/${modelKey}`)
if (modelSet.size > 0) {
return modelSet
}
} catch (err) {
log("[fetchAvailableModels] error", { error: String(err) })
}
log("[fetchAvailableModels] parsed models from models.json (NO whitelist filtering)", {
count: modelSet.size,
connectedProviders: connectedProviders.slice(0, 5)
})
return modelSet
} catch (err) {
log("[fetchAvailableModels] error", { error: String(err) })
return modelSet
}
if (client?.model?.list) {
try {
const modelsResult = await client.model.list()
const models = modelsResult.data ?? []
for (const model of models) {
if (!model?.provider || !model?.id) continue
if (connectedSet.has(model.provider)) {
modelSet.add(`${model.provider}/${model.id}`)
}
}
log("[fetchAvailableModels] fetched models from client (filtered)", {
count: modelSet.size,
connectedProviders: connectedProvidersList.slice(0, 5),
})
} catch (err) {
log("[fetchAvailableModels] client.model.list error", { error: String(err) })
}
}
return modelSet
}
export function __resetModelCache(): void {}

View File

@ -302,6 +302,36 @@ describe("sisyphus-task", () => {
expect(result).toBeNull()
})
test("blocks requiresModel when availability is known and missing the required model", () => {
// #given
const categoryName = "deep"
const availableModels = new Set<string>(["anthropic/claude-opus-4-5"])
// #when
const result = resolveCategoryConfig(categoryName, {
systemDefaultModel: SYSTEM_DEFAULT_MODEL,
availableModels,
})
// #then
expect(result).toBeNull()
})
test("blocks requiresModel when availability is empty", () => {
// #given
const categoryName = "deep"
const availableModels = new Set<string>()
// #when
const result = resolveCategoryConfig(categoryName, {
systemDefaultModel: SYSTEM_DEFAULT_MODEL,
availableModels,
})
// #then
expect(result).toBeNull()
})
test("returns default model from DEFAULT_CATEGORIES for builtin category", () => {
// #given
const categoryName = "visual-engineering"

View File

@ -146,4 +146,62 @@ describe("look-at tool", () => {
expect(result).toContain("Network connection failed")
})
})
describe("createLookAt model passthrough", () => {
// #given multimodal-looker agent has resolved model info
// #when LookAt 도구 실행
// #then session.prompt에 model 정보가 전달되어야 함
test("passes multimodal-looker model to session.prompt when available", async () => {
let promptBody: any
const mockClient = {
app: {
agents: async () => ({
data: [
{
name: "multimodal-looker",
mode: "subagent",
model: { providerID: "google", modelID: "gemini-3-flash" },
},
],
}),
},
session: {
get: async () => ({ data: { directory: "/project" } }),
create: async () => ({ data: { id: "ses_model_passthrough" } }),
prompt: async (input: any) => {
promptBody = input.body
return { data: {} }
},
messages: async () => ({
data: [
{ info: { role: "assistant", time: { created: 1 } }, parts: [{ type: "text", text: "done" }] },
],
}),
},
}
const tool = createLookAt({
client: mockClient,
directory: "/project",
} as any)
const toolContext = {
sessionID: "parent-session",
messageID: "parent-message",
agent: "sisyphus",
abort: new AbortController().signal,
}
await tool.execute(
{ file_path: "/test/file.png", goal: "analyze image" },
toolContext
)
expect(promptBody.model).toEqual({
providerID: "google",
modelID: "gemini-3-flash",
})
})
})
})

View File

@ -3,7 +3,7 @@ import { pathToFileURL } from "node:url"
import { tool, type PluginInput, type ToolDefinition } from "@opencode-ai/plugin"
import { LOOK_AT_DESCRIPTION, MULTIMODAL_LOOKER_AGENT } from "./constants"
import type { LookAtArgs } from "./types"
import { log } from "../../shared/logger"
import { findByNameCaseInsensitive, log, promptWithModelSuggestionRetry } from "../../shared"
interface LookAtArgsWithAlias extends LookAtArgs {
path?: string
@ -130,9 +130,34 @@ Original error: ${createResult.error}`
const sessionID = createResult.data.id
log(`[look_at] Created session: ${sessionID}`)
let agentModel: { providerID: string; modelID: string } | undefined
let agentVariant: string | undefined
try {
const agentsResult = await ctx.client.app?.agents?.()
type AgentInfo = {
name: string
mode?: "subagent" | "primary" | "all"
model?: { providerID: string; modelID: string }
variant?: string
}
const agents = ((agentsResult as { data?: AgentInfo[] })?.data ?? agentsResult) as AgentInfo[] | undefined
if (agents?.length) {
const matchedAgent = findByNameCaseInsensitive(agents, MULTIMODAL_LOOKER_AGENT)
if (matchedAgent?.model) {
agentModel = matchedAgent.model
}
if (matchedAgent?.variant) {
agentVariant = matchedAgent.variant
}
}
} catch (error) {
log("[look_at] Failed to resolve multimodal-looker model info", error)
}
log(`[look_at] Sending prompt with file passthrough to session ${sessionID}`)
try {
await ctx.client.session.prompt({
await promptWithModelSuggestionRetry(ctx.client, {
path: { id: sessionID },
body: {
agent: MULTIMODAL_LOOKER_AGENT,
@ -146,6 +171,8 @@ Original error: ${createResult.error}`
{ type: "text", text: prompt },
{ type: "file", mime: mimeType, url: pathToFileURL(args.file_path).href, filename },
],
...(agentModel ? { model: { providerID: agentModel.providerID, modelID: agentModel.modelID } } : {}),
...(agentVariant ? { variant: agentVariant } : {}),
},
})
} catch (promptError) {