fix(look-at): use synchronous prompt to fix race condition (#1620 regression)

PR #1620 migrated all prompt calls from session.prompt (blocking) to
session.promptAsync (fire-and-forget HTTP 204). This broke look_at which
needs the multimodal-looker response to be available immediately after
the prompt call returns.

Fix: add promptSyncWithModelSuggestionRetry() that uses session.prompt
(blocking) with model suggestion retry support. look_at now uses this
sync variant while all other callers keep using promptAsync.

- Add promptSyncWithModelSuggestionRetry to model-suggestion-retry.ts
- Switch look_at from promptWithModelSuggestionRetry to sync variant
- Add comprehensive tests for the new sync function
- No changes to other callers (delegate-task, background-agent)
This commit is contained in:
YeonGyu-Kim 2026-02-08 02:36:27 +09:00
parent 825a5e70f7
commit 3d4ed912d7
3 changed files with 167 additions and 3 deletions

View File

@ -1,5 +1,5 @@
import { describe, it, expect, mock } from "bun:test"
import { parseModelSuggestion, promptWithModelSuggestionRetry } from "./model-suggestion-retry"
import { parseModelSuggestion, promptWithModelSuggestionRetry, promptSyncWithModelSuggestionRetry } from "./model-suggestion-retry"
describe("parseModelSuggestion", () => {
describe("structured NamedError format", () => {
@ -377,3 +377,128 @@ describe("promptWithModelSuggestionRetry", () => {
expect(promptMock).toHaveBeenCalledTimes(1)
})
})
describe("promptSyncWithModelSuggestionRetry", () => {
it("should use synchronous prompt (not promptAsync)", async () => {
// given a client with both prompt and promptAsync
const promptMock = mock(() => Promise.resolve())
const promptAsyncMock = mock(() => Promise.resolve())
const client = { session: { prompt: promptMock, promptAsync: promptAsyncMock } }
// when calling promptSyncWithModelSuggestionRetry
await promptSyncWithModelSuggestionRetry(client as any, {
path: { id: "session-1" },
body: {
parts: [{ type: "text", text: "hello" }],
model: { providerID: "anthropic", modelID: "claude-sonnet-4" },
},
})
// then should call prompt (sync), NOT promptAsync
expect(promptMock).toHaveBeenCalledTimes(1)
expect(promptAsyncMock).toHaveBeenCalledTimes(0)
})
it("should retry with suggested model on ProviderModelNotFoundError", async () => {
// given a client that fails first with model-not-found, then succeeds
const promptMock = mock()
.mockRejectedValueOnce({
name: "ProviderModelNotFoundError",
data: {
providerID: "anthropic",
modelID: "claude-sonet-4",
suggestions: ["claude-sonnet-4"],
},
})
.mockResolvedValueOnce(undefined)
const client = { session: { prompt: promptMock } }
// when calling promptSyncWithModelSuggestionRetry
await promptSyncWithModelSuggestionRetry(client as any, {
path: { id: "session-1" },
body: {
parts: [{ type: "text", text: "hello" }],
model: { providerID: "anthropic", modelID: "claude-sonet-4" },
},
})
// then should call prompt twice (original + retry with suggestion)
expect(promptMock).toHaveBeenCalledTimes(2)
const retryCall = promptMock.mock.calls[1][0]
expect(retryCall.body.model).toEqual({
providerID: "anthropic",
modelID: "claude-sonnet-4",
})
})
it("should throw original error when no suggestion available", async () => {
// given a client that fails with a non-model error
const originalError = new Error("Connection refused")
const promptMock = mock().mockRejectedValueOnce(originalError)
const client = { session: { prompt: promptMock } }
// when calling promptSyncWithModelSuggestionRetry
// then should throw the original error
await expect(
promptSyncWithModelSuggestionRetry(client as any, {
path: { id: "session-1" },
body: {
parts: [{ type: "text", text: "hello" }],
model: { providerID: "anthropic", modelID: "claude-sonnet-4" },
},
})
).rejects.toThrow("Connection refused")
expect(promptMock).toHaveBeenCalledTimes(1)
})
it("should throw when model-not-found but no model in original request", async () => {
// given a client that fails with model error but no model in body
const promptMock = mock().mockRejectedValueOnce({
name: "ProviderModelNotFoundError",
data: {
providerID: "anthropic",
modelID: "claude-sonet-4",
suggestions: ["claude-sonnet-4"],
},
})
const client = { session: { prompt: promptMock } }
// when calling without model in body
// then should throw (cannot retry without original model)
await expect(
promptSyncWithModelSuggestionRetry(client as any, {
path: { id: "session-1" },
body: {
parts: [{ type: "text", text: "hello" }],
},
})
).rejects.toThrow()
expect(promptMock).toHaveBeenCalledTimes(1)
})
it("should pass all body fields through to prompt", async () => {
// given a client where prompt succeeds
const promptMock = mock().mockResolvedValueOnce(undefined)
const client = { session: { prompt: promptMock } }
// when calling with additional body fields
await promptSyncWithModelSuggestionRetry(client as any, {
path: { id: "session-1" },
body: {
agent: "multimodal-looker",
tools: { task: false },
parts: [{ type: "text", text: "analyze" }],
model: { providerID: "google", modelID: "gemini-3-flash" },
variant: "max",
},
})
// then call should pass all fields through unchanged
const call = promptMock.mock.calls[0][0]
expect(call.body.agent).toBe("multimodal-looker")
expect(call.body.tools).toEqual({ task: false })
expect(call.body.variant).toBe("max")
})
})

View File

@ -88,3 +88,42 @@ export async function promptWithModelSuggestionRetry(
// model errors happen asynchronously server-side and cannot be caught here
await client.session.promptAsync(args as Parameters<typeof client.session.promptAsync>[0])
}
/**
* Synchronous variant of promptWithModelSuggestionRetry.
*
* Uses `session.prompt` (blocking HTTP call that waits for the LLM response)
* instead of `promptAsync` (fire-and-forget HTTP 204).
*
* Required by callers that need the response to be available immediately after
* the call returns e.g. look_at, which reads session messages right away.
*/
export async function promptSyncWithModelSuggestionRetry(
client: Client,
args: PromptArgs,
): Promise<void> {
try {
await client.session.prompt(args as Parameters<typeof client.session.prompt>[0])
} catch (error) {
const suggestion = parseModelSuggestion(error)
if (!suggestion || !args.body.model) {
throw error
}
log("[model-suggestion-retry] Model not found, retrying with suggestion", {
original: `${suggestion.providerID}/${suggestion.modelID}`,
suggested: suggestion.suggestion,
})
await client.session.prompt({
...args,
body: {
...args.body,
model: {
providerID: suggestion.providerID,
modelID: suggestion.suggestion,
},
},
} as Parameters<typeof client.session.prompt>[0])
}
}

View File

@ -3,7 +3,7 @@ import { pathToFileURL } from "node:url"
import { tool, type PluginInput, type ToolDefinition } from "@opencode-ai/plugin"
import { LOOK_AT_DESCRIPTION, MULTIMODAL_LOOKER_AGENT } from "./constants"
import type { LookAtArgs } from "./types"
import { log, promptWithModelSuggestionRetry } from "../../shared"
import { log, promptSyncWithModelSuggestionRetry } from "../../shared"
interface LookAtArgsWithAlias extends LookAtArgs {
path?: string
@ -223,7 +223,7 @@ Original error: ${createResult.error}`
log(`[look_at] Sending prompt with ${isBase64Input ? "base64 image" : "file"} to session ${sessionID}`)
try {
await promptWithModelSuggestionRetry(ctx.client, {
await promptSyncWithModelSuggestionRetry(ctx.client, {
path: { id: sessionID },
body: {
agent: MULTIMODAL_LOOKER_AGENT,