feat(openai): add codex gpt-5.4-mini support

This commit is contained in:
Peter Steinberger 2026-04-04 11:51:57 +09:00
parent 7df763b04d
commit 6b3ff0dd4f
No known key found for this signature in database
12 changed files with 181 additions and 8 deletions

View File

@ -114,6 +114,7 @@ Docs: https://docs.openclaw.ai
- Cron: replay interrupted recurring jobs on the first gateway restart instead of clearing the stale running marker and skipping catch-up until a second restart. (#60583) Thanks @joelnishanth.
- Matrix/backup reset: recreate secret storage during backup reset when stale SSSS state blocks durable backup-key reload, including no-backup repair paths. (#60599) thanks @emonty.
- Plugins/media understanding: enable bundled Groq and Deepgram providers by default so configured audio transcription models load without extra plugin activation config. (#59982) Thanks @yxjsxy.
- Providers/OpenAI Codex: add forward-compat `openai-codex/gpt-5.4-mini` synthesis across provider runtime, model catalog, and model listing so Codex mini works before bundled Pi catalog updates land.
## 2026.4.2

View File

@ -122,6 +122,42 @@ describe("openai codex provider", () => {
});
});
it("resolves gpt-5.4-mini from codex templates with codex-sized limits", () => {
const provider = buildOpenAICodexProviderPlugin();
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4-mini",
modelRegistry: {
find: (providerId: string, modelId: string) => {
if (providerId === "openai-codex" && modelId === "gpt-5.1-codex-mini") {
return {
id: "gpt-5.1-codex-mini",
name: "gpt-5.1-codex-mini",
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
cost: { input: 0.25, output: 2, cacheRead: 0.025, cacheWrite: 0 },
contextWindow: 272_000,
maxTokens: 128_000,
};
}
return null;
},
} as never,
} as never);
expect(model).toMatchObject({
id: "gpt-5.4-mini",
contextWindow: 272_000,
maxTokens: 128_000,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
});
expect(model).not.toHaveProperty("contextTokens");
});
it("augments catalog with gpt-5.4 native contextWindow and runtime cap", () => {
const provider = buildOpenAICodexProviderPlugin();
@ -146,5 +182,11 @@ describe("openai codex provider", () => {
contextTokens: 272_000,
}),
);
expect(entries).toContainEqual(
expect.objectContaining({
id: "gpt-5.4-mini",
contextWindow: 272_000,
}),
);
});
});

View File

@ -33,8 +33,10 @@ import { wrapOpenAICodexProviderStream } from "./stream-hooks.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000;
const OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_MINI_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_54_COST = {
input: 2.5,
@ -42,7 +44,18 @@ const OPENAI_CODEX_GPT_54_COST = {
cacheRead: 0.25,
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_MINI_COST = {
input: 0.75,
output: 4.5,
cacheRead: 0.075,
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
const OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
"gpt-5.1-codex-mini",
...OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
] as const;
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
@ -50,6 +63,7 @@ const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
OPENAI_CODEX_GPT_53_MODEL_ID,
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
"gpt-5.2-codex",
@ -57,6 +71,7 @@ const OPENAI_CODEX_XHIGH_MODEL_IDS = [
] as const;
const OPENAI_CODEX_MODERN_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
"gpt-5.2",
"gpt-5.2-codex",
OPENAI_CODEX_GPT_53_MODEL_ID,
@ -106,6 +121,13 @@ function resolveCodexForwardCompatModel(
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
cost: OPENAI_CODEX_GPT_54_COST,
};
} else if (lower === OPENAI_CODEX_GPT_54_MINI_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS;
patch = {
contextWindow: OPENAI_CODEX_GPT_54_MINI_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
cost: OPENAI_CODEX_GPT_54_MINI_COST,
};
} else if (lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID) {
templateIds = [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS];
patch = {
@ -307,6 +329,11 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
providerId: PROVIDER_ID,
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
});
const gpt54MiniTemplate = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
templateIds: OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS,
});
const sparkTemplate = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
@ -320,6 +347,12 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
}),
buildSyntheticCatalogEntry(gpt54MiniTemplate, {
id: OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_CODEX_GPT_54_MINI_CONTEXT_TOKENS,
}),
buildSyntheticCatalogEntry(sparkTemplate, {
id: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
reasoning: true,

View File

@ -247,6 +247,12 @@ describe("loadModelCatalog", () => {
id: "gpt-5.4",
}),
);
expect(result).toContainEqual(
expect.objectContaining({
provider: "openai-codex",
id: "gpt-5.4-mini",
}),
);
});
it("merges configured models for opted-in non-pi-native providers", async () => {

View File

@ -363,7 +363,7 @@ describe("isModernModelRef", () => {
provider === "openai" &&
["gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", "gpt-5.4-nano"].includes(context.modelId)
? true
: provider === "openai-codex" && context.modelId === "gpt-5.4"
: provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-mini"].includes(context.modelId)
? true
: provider === "opencode" && ["claude-opus-4-6", "gemini-3-pro"].includes(context.modelId)
? true
@ -377,6 +377,7 @@ describe("isModernModelRef", () => {
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-mini" })).toBe(true);
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-nano" })).toBe(true);
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4" })).toBe(true);
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4-mini" })).toBe(true);
expect(isModernModelRef({ provider: "opencode", id: "claude-opus-4-6" })).toBe(true);
expect(isModernModelRef({ provider: "opencode", id: "gemini-3-pro" })).toBe(true);
expect(isModernModelRef({ provider: "opencode-go", id: "kimi-k2.5" })).toBe(true);

View File

@ -183,9 +183,16 @@ function buildDynamicModel(
const template =
lower === "gpt-5.4"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.2-codex"])
: lower === "gpt-5.3-codex-spark"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.2-codex"])
: findTemplate(params, "openai-codex", ["gpt-5.2-codex"]);
: lower === "gpt-5.4-mini"
? findTemplate(params, "openai-codex", [
"gpt-5.4",
"gpt-5.1-codex-mini",
"gpt-5.3-codex",
"gpt-5.2-codex",
])
: lower === "gpt-5.3-codex-spark"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.2-codex"])
: findTemplate(params, "openai-codex", ["gpt-5.2-codex"]);
const fallback = {
provider: "openai-codex",
api: "openai-codex-responses",
@ -212,6 +219,21 @@ function buildDynamicModel(
fallback,
);
}
if (lower === "gpt-5.4-mini") {
return cloneTemplate(
template,
modelId,
{
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: OPENAI_CODEX_BASE_URL,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
contextWindow: 272_000,
maxTokens: 128_000,
},
fallback,
);
}
if (lower === "gpt-5.3-codex-spark") {
return cloneTemplate(
template,

View File

@ -57,6 +57,7 @@ export function buildOpenAICodexForwardCompatExpectation(
baseUrl: string;
} {
const isGpt54 = id === "gpt-5.4";
const isGpt54Mini = id === "gpt-5.4-mini";
const isSpark = id === "gpt-5.3-codex-spark";
return {
provider: "openai-codex",
@ -69,7 +70,9 @@ export function buildOpenAICodexForwardCompatExpectation(
? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }
: isGpt54
? { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 }
: OPENAI_CODEX_TEMPLATE_MODEL.cost,
: isGpt54Mini
? { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 }
: OPENAI_CODEX_TEMPLATE_MODEL.cost,
contextWindow: isGpt54 ? 1_050_000 : isSpark ? 128_000 : 272000,
...(isGpt54 ? { contextTokens: 272_000 } : {}),
maxTokens: 128000,

View File

@ -847,13 +847,13 @@ describe("resolveModel", () => {
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
});
it("builds an openai-codex fallback for gpt-5.4", () => {
it("builds an openai-codex fallback for gpt-5.4-mini", () => {
mockOpenAICodexTemplateModel(discoverModels);
const result = resolveModelForTest("openai-codex", "gpt-5.4", "/tmp/agent");
const result = resolveModelForTest("openai-codex", "gpt-5.4-mini", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4-mini"));
});
it("builds an openai-codex fallback for gpt-5.3-codex-spark", () => {

View File

@ -12,6 +12,13 @@ const OPENAI_CODEX_MODEL = {
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};
const OPENAI_CODEX_MINI_MODEL = {
...OPENAI_CODEX_MODEL,
id: "gpt-5.4-mini",
name: "GPT-5.4 Mini",
contextWindow: 272_000,
};
const OPENAI_CODEX_53_MODEL = {
...OPENAI_CODEX_MODEL,
id: "gpt-5.4",
@ -170,6 +177,35 @@ describe("modelsListCommand forward-compat", () => {
expect(codex?.tags).not.toContain("missing");
});
it("does not mark configured codex mini as missing when forward-compat can build a fallback", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({
entries: [
{
key: "openai-codex/gpt-5.4-mini",
ref: { provider: "openai-codex", model: "gpt-5.4-mini" },
tags: new Set(["configured"]),
aliases: [],
},
],
});
mocks.resolveModelWithRegistry.mockReturnValueOnce({ ...OPENAI_CODEX_MINI_MODEL });
const runtime = createRuntime();
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = lastPrintedRows<{
key: string;
tags: string[];
missing: boolean;
}>();
const codexMini = rows.find((row) => row.key === "openai-codex/gpt-5.4-mini");
expect(codexMini).toBeTruthy();
expect(codexMini?.missing).toBe(false);
expect(codexMini?.tags).not.toContain("missing");
});
it("passes source config to model registry loading for persistence safety", async () => {
const runtime = createRuntime();

View File

@ -14,6 +14,7 @@ export const expectedAugmentedOpenaiCodexCatalogEntries = [
{ provider: "openai", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{
provider: "openai-codex",
id: "gpt-5.3-codex-spark",

View File

@ -127,6 +127,7 @@ function createOpenAiCatalogProviderPlugin(
{ provider: "openai", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{
provider: "openai-codex",
id: "gpt-5.3-codex-spark",

View File

@ -604,6 +604,33 @@ export function describeOpenAIProviderRuntimeContract() {
});
});
it("owns forward-compat codex mini models", () => {
const provider = requireProviderContractProvider("openai-codex");
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4-mini",
modelRegistry: {
find: (_provider: string, id: string) =>
id === "gpt-5.1-codex-mini"
? createModel({
id,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
})
: null,
} as never,
});
expect(model).toMatchObject({
id: "gpt-5.4-mini",
provider: "openai-codex",
api: "openai-codex-responses",
contextWindow: 272_000,
maxTokens: 128_000,
});
});
it("owns codex transport defaults", () => {
const provider = requireProviderContractProvider("openai-codex");
expect(