diff --git a/CHANGELOG.md b/CHANGELOG.md index 563313a2933..539662eb424 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ Docs: https://docs.openclaw.ai - Onboarding/CLI: restore terminal state without resuming paused `stdin`, so onboarding exits cleanly after choosing Web UI and the installer returns instead of appearing stuck. - Onboarding/Providers: add vLLM as an onboarding provider with model discovery, auth profile wiring, and non-interactive auth-choice validation. (#12577) Thanks @gejifeng. - Onboarding/Providers: preserve Hugging Face auth intent in auth-choice remapping (`tokenProvider=huggingface` with `authChoice=apiKey`) and skip env-override prompts when an explicit token is provided. (#13472) Thanks @Josephrp. +- Models/Codex: resolve configured `openai-codex/gpt-5.3-codex-spark` through forward-compat fallback during `models list`, so it is not incorrectly tagged as missing when runtime resolution succeeds. (#15174) Thanks @loiie45e. - macOS Voice Wake: fix a crash in trigger trimming for CJK/Unicode transcripts by matching and slicing on original-string ranges instead of transformed-string indices. (#11052) Thanks @Flash-LHR. - Heartbeat: prevent scheduler silent-death races during runner reloads, preserve retry cooldown backoff under wake bursts, and prioritize user/action wake causes over interval/retry reasons when coalescing. (#15108) Thanks @joeykrug. - Outbound targets: fail closed for WhatsApp/Twitch/Google Chat fallback paths so invalid or missing targets are dropped instead of rerouted, and align resolver hints with strict target requirements. (#13578) Thanks @mcaxtr. diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index 2d45d1116f2..41e1f8baf10 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -40,11 +40,11 @@ function resolveOpenAICodexGpt53FallbackModel( if (normalizedProvider !== "openai-codex") { return undefined; } - const loweredModelId = trimmedModelId.toLowerCase(); - if ( - loweredModelId !== OPENAI_CODEX_GPT_53_MODEL_ID && - loweredModelId !== OPENAI_CODEX_GPT_53_SPARK_MODEL_ID - ) { + + const lower = trimmedModelId.toLowerCase(); + const isGpt53 = lower === OPENAI_CODEX_GPT_53_MODEL_ID; + const isSpark = lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID; + if (!isGpt53 && !isSpark) { return undefined; } @@ -57,6 +57,8 @@ function resolveOpenAICodexGpt53FallbackModel( ...template, id: trimmedModelId, name: trimmedModelId, + // Spark is a low-latency variant; keep api/baseUrl from template. + ...(isSpark ? { reasoning: true } : {}), } as Model); } diff --git a/src/commands/models/list.list-command.forward-compat.test.ts b/src/commands/models/list.list-command.forward-compat.test.ts new file mode 100644 index 00000000000..2f7f6ec2719 --- /dev/null +++ b/src/commands/models/list.list-command.forward-compat.test.ts @@ -0,0 +1,95 @@ +import { describe, expect, it, vi } from "vitest"; + +const mocks = vi.hoisted(() => { + const printModelTable = vi.fn(); + return { + loadConfig: vi.fn().mockReturnValue({ + agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex-spark" } } }, + models: { providers: {} }, + }), + ensureAuthProfileStore: vi.fn().mockReturnValue({ version: 1, profiles: {}, order: {} }), + loadModelRegistry: vi.fn().mockResolvedValue({ models: [], availableKeys: new Set() }), + resolveConfiguredEntries: vi.fn().mockReturnValue({ + entries: [ + { + key: "openai-codex/gpt-5.3-codex-spark", + ref: { provider: "openai-codex", model: "gpt-5.3-codex-spark" }, + tags: new Set(["configured"]), + aliases: [], + }, + ], + }), + printModelTable, + resolveModel: vi.fn().mockReturnValue({ + model: { + provider: "openai-codex", + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + api: "openai-codex-responses", + baseUrl: "https://chatgpt.com/backend-api", + input: ["text"], + contextWindow: 272000, + maxTokens: 128000, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + }, + error: undefined, + authStorage: {} as never, + modelRegistry: {} as never, + }), + }; +}); + +vi.mock("../../config/config.js", () => ({ + loadConfig: mocks.loadConfig, +})); + +vi.mock("../../agents/auth-profiles.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + ensureAuthProfileStore: mocks.ensureAuthProfileStore, + listProfilesForProvider: vi.fn().mockReturnValue([]), + }; +}); + +vi.mock("./list.registry.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadModelRegistry: mocks.loadModelRegistry, + }; +}); + +vi.mock("./list.configured.js", () => ({ + resolveConfiguredEntries: mocks.resolveConfiguredEntries, +})); + +vi.mock("./list.table.js", () => ({ + printModelTable: mocks.printModelTable, +})); + +vi.mock("../../agents/pi-embedded-runner/model.js", () => ({ + resolveModel: mocks.resolveModel, +})); + +import { modelsListCommand } from "./list.list-command.js"; + +describe("modelsListCommand forward-compat", () => { + it("does not mark configured codex spark as missing when resolveModel can build a fallback", async () => { + const runtime = { log: vi.fn(), error: vi.fn() }; + + await modelsListCommand({ json: true }, runtime as never); + + expect(mocks.printModelTable).toHaveBeenCalled(); + const rows = mocks.printModelTable.mock.calls[0]?.[0] as Array<{ + key: string; + tags: string[]; + missing: boolean; + }>; + + const spark = rows.find((r) => r.key === "openai-codex/gpt-5.3-codex-spark"); + expect(spark).toBeTruthy(); + expect(spark?.missing).toBe(false); + expect(spark?.tags).not.toContain("missing"); + }); +}); diff --git a/src/commands/models/list.list-command.ts b/src/commands/models/list.list-command.ts index a5c5e987448..dcc8bf089ff 100644 --- a/src/commands/models/list.list-command.ts +++ b/src/commands/models/list.list-command.ts @@ -3,6 +3,7 @@ import type { RuntimeEnv } from "../../runtime.js"; import type { ModelRow } from "./list.types.js"; import { ensureAuthProfileStore } from "../../agents/auth-profiles.js"; import { parseModelRef } from "../../agents/model-selection.js"; +import { resolveModel } from "../../agents/pi-embedded-runner/model.js"; import { loadConfig } from "../../config/config.js"; import { resolveConfiguredEntries } from "./list.configured.js"; import { loadModelRegistry, toModelRow } from "./list.registry.js"; @@ -99,7 +100,13 @@ export async function modelsListCommand( if (providerFilter && entry.ref.provider.toLowerCase() !== providerFilter) { continue; } - const model = modelByKey.get(entry.key); + let model = modelByKey.get(entry.key); + if (!model) { + const resolved = resolveModel(entry.ref.provider, entry.ref.model, undefined, cfg); + if (resolved.model && !resolved.error) { + model = resolved.model; + } + } if (opts.local && model && !isLocalBaseUrl(model.baseUrl)) { continue; }