Merge pull request #2248 from code-yeongyu/fix/pr-2080-model-format

fix: model format normalization and explicit config cache bypass
This commit is contained in:
YeonGyu-Kim 2026-03-03 00:48:04 +09:00 committed by GitHub
commit 025d2a3579
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 60 additions and 5 deletions

View File

@ -0,0 +1,25 @@
import { describe, expect, test } from "bun:test"
import { resolveModelPipeline } from "./model-resolution-pipeline"
describe("resolveModelPipeline", () => {
test("does not return unused explicit user config metadata in override result", () => {
// given
const result = resolveModelPipeline({
intent: {
userModel: "openai/gpt-5.3-codex",
},
constraints: {
availableModels: new Set<string>(),
},
})
// when
const hasExplicitUserConfigField = result
? Object.prototype.hasOwnProperty.call(result, "explicitUserConfig")
: false
// then
expect(result).toEqual({ model: "openai/gpt-5.3-codex", provenance: "override" })
expect(hasExplicitUserConfigField).toBe(false)
})
})

View File

@ -34,7 +34,6 @@ export type ModelResolutionResult = {
variant?: string variant?: string
attempted?: string[] attempted?: string[]
reason?: string reason?: string
explicitUserConfig?: boolean
} }
@ -56,7 +55,7 @@ export function resolveModelPipeline(
const normalizedUserModel = normalizeModel(intent?.userModel) const normalizedUserModel = normalizeModel(intent?.userModel)
if (normalizedUserModel) { if (normalizedUserModel) {
log("Model resolved via config override", { model: normalizedUserModel }) log("Model resolved via config override", { model: normalizedUserModel })
return { model: normalizedUserModel, provenance: "override", explicitUserConfig: true } return { model: normalizedUserModel, provenance: "override" }
} }
const normalizedCategoryDefault = normalizeModel(intent?.categoryDefaultModel) const normalizedCategoryDefault = normalizeModel(intent?.categoryDefaultModel)

View File

@ -4,6 +4,7 @@ import { resolveSubagentExecution } from "./subagent-resolver"
import type { DelegateTaskArgs } from "./types" import type { DelegateTaskArgs } from "./types"
import type { ExecutorContext } from "./executor-types" import type { ExecutorContext } from "./executor-types"
import * as logger from "../../shared/logger" import * as logger from "../../shared/logger"
import * as connectedProvidersCache from "../../shared/connected-providers-cache"
function createBaseArgs(overrides?: Partial<DelegateTaskArgs>): DelegateTaskArgs { function createBaseArgs(overrides?: Partial<DelegateTaskArgs>): DelegateTaskArgs {
return { return {
@ -79,4 +80,25 @@ describe("resolveSubagentExecution", () => {
error: "network timeout", error: "network timeout",
}) })
}) })
test("normalizes matched agent model string before returning categoryModel", async () => {
//#given
const cacheSpy = spyOn(connectedProvidersCache, "readProviderModelsCache").mockReturnValue({
models: { openai: ["grok-3"] },
connected: ["openai"],
updatedAt: "2026-03-03T00:00:00.000Z",
})
const args = createBaseArgs({ subagent_type: "oracle" })
const executorCtx = createExecutorContext(async () => ([
{ name: "oracle", mode: "subagent", model: "openai/gpt-5.3-codex" },
]))
//#when
const result = await resolveSubagentExecution(args, executorCtx, "sisyphus", "deep")
//#then
expect(result.error).toBeUndefined()
expect(result.categoryModel).toEqual({ providerID: "openai", modelID: "gpt-5.3-codex" })
cacheSpy.mockRestore()
})
}) })

View File

@ -51,7 +51,11 @@ Create the work plan directly - that's your job as the planning agent.`,
try { try {
const agentsResult = await client.app.agents() const agentsResult = await client.app.agents()
type AgentInfo = { name: string; mode?: "subagent" | "primary" | "all"; model?: { providerID: string; modelID: string } } type AgentInfo = {
name: string
mode?: "subagent" | "primary" | "all"
model?: string | { providerID: string; modelID: string }
}
const agents = normalizeSDKResponse(agentsResult, [] as AgentInfo[], { const agents = normalizeSDKResponse(agentsResult, [] as AgentInfo[], {
preferResponseOnMissingData: true, preferResponseOnMissingData: true,
}) })
@ -99,7 +103,9 @@ Create the work plan directly - that's your job as the planning agent.`,
if (agentOverride?.model || agentRequirement || matchedAgent.model) { if (agentOverride?.model || agentRequirement || matchedAgent.model) {
const availableModels = await getAvailableModelsForDelegateTask(client) const availableModels = await getAvailableModelsForDelegateTask(client)
const normalizedMatchedModel = normalizeModelFormat(matchedAgent.model as Parameters<typeof normalizeModelFormat>[0]) const normalizedMatchedModel = matchedAgent.model
? normalizeModelFormat(matchedAgent.model)
: undefined
const matchedAgentModelStr = normalizedMatchedModel const matchedAgentModelStr = normalizedMatchedModel
? `${normalizedMatchedModel.providerID}/${normalizedMatchedModel.modelID}` ? `${normalizedMatchedModel.providerID}/${normalizedMatchedModel.modelID}`
: undefined : undefined
@ -122,7 +128,10 @@ Create the work plan directly - that's your job as the planning agent.`,
} }
if (!categoryModel && matchedAgent.model) { if (!categoryModel && matchedAgent.model) {
categoryModel = matchedAgent.model const normalizedMatchedModel = normalizeModelFormat(matchedAgent.model)
if (normalizedMatchedModel) {
categoryModel = normalizedMatchedModel
}
} }
} catch (error) { } catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error) const errorMessage = error instanceof Error ? error.message : String(error)