openai-codex: add gpt-5.3-codex-spark forward-compat model (#15174)

Merged via maintainer flow after rebase + local gates.

Prepared head SHA: 6cac87cbf9

Co-authored-by: loiie45e <15420100+loiie45e@users.noreply.github.com>
Co-authored-by: mbelinky <2406260+mbelinky@users.noreply.github.com>
This commit is contained in:
loiie45e 2026-02-13 23:21:07 +08:00 committed by GitHub
parent 96318641d8
commit 2e04630105
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 111 additions and 6 deletions

View File

@ -20,6 +20,7 @@ Docs: https://docs.openclaw.ai
- Onboarding/CLI: restore terminal state without resuming paused `stdin`, so onboarding exits cleanly after choosing Web UI and the installer returns instead of appearing stuck.
- Onboarding/Providers: add vLLM as an onboarding provider with model discovery, auth profile wiring, and non-interactive auth-choice validation. (#12577) Thanks @gejifeng.
- Onboarding/Providers: preserve Hugging Face auth intent in auth-choice remapping (`tokenProvider=huggingface` with `authChoice=apiKey`) and skip env-override prompts when an explicit token is provided. (#13472) Thanks @Josephrp.
- Models/Codex: resolve configured `openai-codex/gpt-5.3-codex-spark` through forward-compat fallback during `models list`, so it is not incorrectly tagged as missing when runtime resolution succeeds. (#15174) Thanks @loiie45e.
- macOS Voice Wake: fix a crash in trigger trimming for CJK/Unicode transcripts by matching and slicing on original-string ranges instead of transformed-string indices. (#11052) Thanks @Flash-LHR.
- Heartbeat: prevent scheduler silent-death races during runner reloads, preserve retry cooldown backoff under wake bursts, and prioritize user/action wake causes over interval/retry reasons when coalescing. (#15108) Thanks @joeykrug.
- Outbound targets: fail closed for WhatsApp/Twitch/Google Chat fallback paths so invalid or missing targets are dropped instead of rerouted, and align resolver hints with strict target requirements. (#13578) Thanks @mcaxtr.

View File

@ -40,11 +40,11 @@ function resolveOpenAICodexGpt53FallbackModel(
if (normalizedProvider !== "openai-codex") {
return undefined;
}
const loweredModelId = trimmedModelId.toLowerCase();
if (
loweredModelId !== OPENAI_CODEX_GPT_53_MODEL_ID &&
loweredModelId !== OPENAI_CODEX_GPT_53_SPARK_MODEL_ID
) {
const lower = trimmedModelId.toLowerCase();
const isGpt53 = lower === OPENAI_CODEX_GPT_53_MODEL_ID;
const isSpark = lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID;
if (!isGpt53 && !isSpark) {
return undefined;
}
@ -57,6 +57,8 @@ function resolveOpenAICodexGpt53FallbackModel(
...template,
id: trimmedModelId,
name: trimmedModelId,
// Spark is a low-latency variant; keep api/baseUrl from template.
...(isSpark ? { reasoning: true } : {}),
} as Model<Api>);
}

View File

@ -0,0 +1,95 @@
import { describe, expect, it, vi } from "vitest";
const mocks = vi.hoisted(() => {
const printModelTable = vi.fn();
return {
loadConfig: vi.fn().mockReturnValue({
agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex-spark" } } },
models: { providers: {} },
}),
ensureAuthProfileStore: vi.fn().mockReturnValue({ version: 1, profiles: {}, order: {} }),
loadModelRegistry: vi.fn().mockResolvedValue({ models: [], availableKeys: new Set() }),
resolveConfiguredEntries: vi.fn().mockReturnValue({
entries: [
{
key: "openai-codex/gpt-5.3-codex-spark",
ref: { provider: "openai-codex", model: "gpt-5.3-codex-spark" },
tags: new Set(["configured"]),
aliases: [],
},
],
}),
printModelTable,
resolveModel: vi.fn().mockReturnValue({
model: {
provider: "openai-codex",
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
error: undefined,
authStorage: {} as never,
modelRegistry: {} as never,
}),
};
});
vi.mock("../../config/config.js", () => ({
loadConfig: mocks.loadConfig,
}));
vi.mock("../../agents/auth-profiles.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../../agents/auth-profiles.js")>();
return {
...actual,
ensureAuthProfileStore: mocks.ensureAuthProfileStore,
listProfilesForProvider: vi.fn().mockReturnValue([]),
};
});
vi.mock("./list.registry.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./list.registry.js")>();
return {
...actual,
loadModelRegistry: mocks.loadModelRegistry,
};
});
vi.mock("./list.configured.js", () => ({
resolveConfiguredEntries: mocks.resolveConfiguredEntries,
}));
vi.mock("./list.table.js", () => ({
printModelTable: mocks.printModelTable,
}));
vi.mock("../../agents/pi-embedded-runner/model.js", () => ({
resolveModel: mocks.resolveModel,
}));
import { modelsListCommand } from "./list.list-command.js";
describe("modelsListCommand forward-compat", () => {
it("does not mark configured codex spark as missing when resolveModel can build a fallback", async () => {
const runtime = { log: vi.fn(), error: vi.fn() };
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls[0]?.[0] as Array<{
key: string;
tags: string[];
missing: boolean;
}>;
const spark = rows.find((r) => r.key === "openai-codex/gpt-5.3-codex-spark");
expect(spark).toBeTruthy();
expect(spark?.missing).toBe(false);
expect(spark?.tags).not.toContain("missing");
});
});

View File

@ -3,6 +3,7 @@ import type { RuntimeEnv } from "../../runtime.js";
import type { ModelRow } from "./list.types.js";
import { ensureAuthProfileStore } from "../../agents/auth-profiles.js";
import { parseModelRef } from "../../agents/model-selection.js";
import { resolveModel } from "../../agents/pi-embedded-runner/model.js";
import { loadConfig } from "../../config/config.js";
import { resolveConfiguredEntries } from "./list.configured.js";
import { loadModelRegistry, toModelRow } from "./list.registry.js";
@ -99,7 +100,13 @@ export async function modelsListCommand(
if (providerFilter && entry.ref.provider.toLowerCase() !== providerFilter) {
continue;
}
const model = modelByKey.get(entry.key);
let model = modelByKey.get(entry.key);
if (!model) {
const resolved = resolveModel(entry.ref.provider, entry.ref.model, undefined, cfg);
if (resolved.model && !resolved.error) {
model = resolved.model;
}
}
if (opts.local && model && !isLocalBaseUrl(model.baseUrl)) {
continue;
}