fix(models): keep codex spark codex-only

This commit is contained in:
Peter Steinberger 2026-03-13 00:51:30 +00:00
parent d4f535b203
commit d5b3f2ed71
No known key found for this signature in database
16 changed files with 339 additions and 4 deletions

View File

@ -15,6 +15,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Models/OpenAI Codex Spark: keep `gpt-5.3-codex-spark` working on the `openai-codex/*` path via resolver fallbacks and clearer Codex-only handling, while continuing to suppress the stale direct `openai/*` Spark row that OpenAI rejects live.
- Ollama/Kimi Cloud: apply the Moonshot Kimi payload compatibility wrapper to Ollama-hosted Kimi models like `kimi-k2.5:cloud`, so tool routing no longer breaks when thinking is enabled. (#41519) Thanks @vincentkoc.
- Models/Kimi Coding: send the built-in `User-Agent: claude-code/0.1.0` header by default for `kimi-coding` while still allowing explicit provider headers to override it, so Kimi Code subscription auth can work without a local header-injection proxy. (#30099) Thanks @Amineelfarssi and @vincentkoc.
- Security/device pairing: switch `/pair` and `openclaw qr` setup codes to short-lived bootstrap tokens so the next release no longer embeds shared gateway credentials in chat or QR pairing payloads. Thanks @lintsinghua.

View File

@ -48,6 +48,7 @@ OpenClaw ships with the piai catalog. These providers require **no**
- OpenAI Responses WebSocket warm-up defaults to enabled via `params.openaiWsWarmup` (`true`/`false`)
- OpenAI priority processing can be enabled via `agents.defaults.models["openai/<model>"].params.serviceTier`
- OpenAI fast mode can be enabled per model via `agents.defaults.models["<provider>/<model>"].params.fastMode`
- `openai/gpt-5.3-codex-spark` is intentionally suppressed in OpenClaw because the live OpenAI API rejects it; Spark is treated as Codex-only
```json5
{
@ -81,6 +82,7 @@ OpenClaw ships with the piai catalog. These providers require **no**
- Default transport is `auto` (WebSocket-first, SSE fallback)
- Override per model via `agents.defaults.models["openai-codex/<model>"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`)
- Shares the same `/fast` toggle and `params.fastMode` config as direct `openai/*`
- `openai-codex/gpt-5.3-codex-spark` remains available when the Codex OAuth catalog exposes it; entitlement-dependent
- Policy note: OpenAI Codex OAuth is explicitly supported for external tools/workflows like OpenClaw.
```json5

View File

@ -36,6 +36,12 @@ openclaw onboard --openai-api-key "$OPENAI_API_KEY"
OpenAI's current API model docs list `gpt-5.4` and `gpt-5.4-pro` for direct
OpenAI API usage. OpenClaw forwards both through the `openai/*` Responses path.
OpenClaw intentionally suppresses the stale `openai/gpt-5.3-codex-spark` row,
because direct OpenAI API calls reject it in live traffic.
OpenClaw does **not** expose `openai/gpt-5.3-codex-spark` on the direct OpenAI
API path. `pi-ai` still ships a built-in row for that model, but live OpenAI API
requests currently reject it. Spark is treated as Codex-only in OpenClaw.
## Option B: OpenAI Code (Codex) subscription
@ -63,6 +69,18 @@ openclaw models auth login --provider openai-codex
OpenAI's current Codex docs list `gpt-5.4` as the current Codex model. OpenClaw
maps that to `openai-codex/gpt-5.4` for ChatGPT/Codex OAuth usage.
If your Codex account is entitled to Codex Spark, OpenClaw also supports:
- `openai-codex/gpt-5.3-codex-spark`
OpenClaw treats Codex Spark as Codex-only. It does not expose a direct
`openai/gpt-5.3-codex-spark` API-key path.
OpenClaw also preserves `openai-codex/gpt-5.3-codex-spark` when `pi-ai`
discovers it. Treat it as entitlement-dependent and experimental: Codex Spark is
separate from GPT-5.4 `/fast`, and availability depends on the signed-in Codex /
ChatGPT account.
### Transport default
OpenClaw uses `pi-ai` for model streaming. For both `openai/*` and

View File

@ -114,6 +114,55 @@ describe("loadModelCatalog", () => {
expect(spark?.reasoning).toBe(true);
});
it("filters stale openai gpt-5.3-codex-spark built-ins from the catalog", async () => {
mockPiDiscoveryModels([
{
id: "gpt-5.3-codex-spark",
provider: "openai",
name: "GPT-5.3 Codex Spark",
reasoning: true,
contextWindow: 128000,
input: ["text", "image"],
},
{
id: "gpt-5.3-codex-spark",
provider: "azure-openai-responses",
name: "GPT-5.3 Codex Spark",
reasoning: true,
contextWindow: 128000,
input: ["text", "image"],
},
{
id: "gpt-5.3-codex-spark",
provider: "openai-codex",
name: "GPT-5.3 Codex Spark",
reasoning: true,
contextWindow: 128000,
input: ["text"],
},
]);
const result = await loadModelCatalog({ config: {} as OpenClawConfig });
expect(result).not.toContainEqual(
expect.objectContaining({
provider: "openai",
id: "gpt-5.3-codex-spark",
}),
);
expect(result).not.toContainEqual(
expect.objectContaining({
provider: "azure-openai-responses",
id: "gpt-5.3-codex-spark",
}),
);
expect(result).toContainEqual(
expect.objectContaining({
provider: "openai-codex",
id: "gpt-5.3-codex-spark",
}),
);
});
it("adds gpt-5.4 forward-compat catalog entries when template models exist", async () => {
mockPiDiscoveryModels([
{

View File

@ -1,6 +1,7 @@
import { type OpenClawConfig, loadConfig } from "../config/config.js";
import { createSubsystemLogger } from "../logging/subsystem.js";
import { resolveOpenClawAgentDir } from "./agent-paths.js";
import { shouldSuppressBuiltInModel } from "./model-suppression.js";
import { ensureOpenClawModelsJson } from "./models-config.js";
const log = createSubsystemLogger("model-catalog");
@ -242,6 +243,9 @@ export async function loadModelCatalog(params?: {
if (!provider) {
continue;
}
if (shouldSuppressBuiltInModel({ provider, id })) {
continue;
}
const name = String(entry?.name ?? id).trim() || id;
const contextWindow =
typeof entry?.contextWindow === "number" && entry.contextWindow > 0

View File

@ -16,6 +16,9 @@ const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6";
@ -133,6 +136,19 @@ function resolveOpenAICodexForwardCompatModel(
contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
};
} else if (lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID) {
templateIds = [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS];
eligibleProviders = CODEX_GPT54_ELIGIBLE_PROVIDERS;
patch = {
api: "openai-codex-responses",
provider: normalizedProvider,
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS,
};
} else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) {
templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS;
eligibleProviders = CODEX_GPT53_ELIGIBLE_PROVIDERS;

View File

@ -0,0 +1,27 @@
import { normalizeProviderId } from "./model-selection.js";
const OPENAI_DIRECT_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const SUPPRESSED_SPARK_PROVIDERS = new Set(["openai", "azure-openai-responses"]);
export function shouldSuppressBuiltInModel(params: {
provider?: string | null;
id?: string | null;
}) {
const provider = normalizeProviderId(params.provider?.trim().toLowerCase() ?? "");
const id = params.id?.trim().toLowerCase() ?? "";
// pi-ai still ships non-Codex Spark rows, but OpenClaw treats Spark as
// Codex-only until upstream availability is proven on direct API paths.
return SUPPRESSED_SPARK_PROVIDERS.has(provider) && id === OPENAI_DIRECT_SPARK_MODEL_ID;
}
export function buildSuppressedBuiltInModelError(params: {
provider?: string | null;
id?: string | null;
}): string | undefined {
if (!shouldSuppressBuiltInModel(params)) {
return undefined;
}
const provider = normalizeProviderId(params.provider?.trim().toLowerCase() ?? "") || "openai";
return `Unknown model: ${provider}/${OPENAI_DIRECT_SPARK_MODEL_ID}. ${OPENAI_DIRECT_SPARK_MODEL_ID} is only supported via openai-codex OAuth. Use openai-codex/${OPENAI_DIRECT_SPARK_MODEL_ID}.`;
}

View File

@ -58,6 +58,16 @@ describe("pi embedded model e2e smoke", () => {
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
});
it("builds an openai-codex forward-compat fallback for gpt-5.3-codex-spark", () => {
mockOpenAICodexTemplateModel();
const result = resolveModel("openai-codex", "gpt-5.3-codex-spark", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject(
buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex-spark"),
);
});
it("keeps unknown-model errors for non-forward-compat IDs", () => {
const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent");
expect(result.model).toBeUndefined();

View File

@ -35,15 +35,25 @@ export function mockOpenAICodexTemplateModel(): void {
export function buildOpenAICodexForwardCompatExpectation(
id: string = "gpt-5.3-codex",
): Partial<typeof OPENAI_CODEX_TEMPLATE_MODEL> & { provider: string; id: string } {
): Partial<ModelDefinitionConfig> & {
provider: string;
id: string;
api: string;
baseUrl: string;
} {
const isGpt54 = id === "gpt-5.4";
const isSpark = id === "gpt-5.3-codex-spark";
return {
provider: "openai-codex",
id,
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
contextWindow: isGpt54 ? 1_050_000 : 272000,
input: isSpark ? ["text"] : ["text", "image"],
cost: isSpark
? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }
: OPENAI_CODEX_TEMPLATE_MODEL.cost,
contextWindow: isGpt54 ? 1_050_000 : isSpark ? 128_000 : 272000,
maxTokens: 128000,
};
}

View File

@ -546,6 +546,60 @@ describe("resolveModel", () => {
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
});
it("builds an openai-codex fallback for gpt-5.3-codex-spark", () => {
mockOpenAICodexTemplateModel();
const result = resolveModel("openai-codex", "gpt-5.3-codex-spark", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject(
buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex-spark"),
);
});
it("keeps openai-codex gpt-5.3-codex-spark when discovery provides it", () => {
mockDiscoveredModel({
provider: "openai-codex",
modelId: "gpt-5.3-codex-spark",
templateModel: {
...buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex-spark"),
name: "GPT-5.3 Codex Spark",
input: ["text"],
},
});
const result = resolveModel("openai-codex", "gpt-5.3-codex-spark", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "openai-codex",
id: "gpt-5.3-codex-spark",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
});
});
it("rejects stale direct openai gpt-5.3-codex-spark discovery rows", () => {
mockDiscoveredModel({
provider: "openai",
modelId: "gpt-5.3-codex-spark",
templateModel: buildForwardCompatTemplate({
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
provider: "openai",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
}),
});
const result = resolveModel("openai", "gpt-5.3-codex-spark", "/tmp/agent");
expect(result.model).toBeUndefined();
expect(result.error).toBe(
"Unknown model: openai/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.",
);
});
it("applies provider overrides to openai gpt-5.4 forward-compat models", () => {
mockDiscoveredModel({
provider: "openai",
@ -725,6 +779,24 @@ describe("resolveModel", () => {
expectUnknownModelError("openai-codex", "gpt-4.1-mini");
});
it("rejects direct openai gpt-5.3-codex-spark with a codex-only hint", () => {
const result = resolveModel("openai", "gpt-5.3-codex-spark", "/tmp/agent");
expect(result.model).toBeUndefined();
expect(result.error).toBe(
"Unknown model: openai/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.",
);
});
it("rejects azure openai gpt-5.3-codex-spark with a codex-only hint", () => {
const result = resolveModel("azure-openai-responses", "gpt-5.3-codex-spark", "/tmp/agent");
expect(result.model).toBeUndefined();
expect(result.error).toBe(
"Unknown model: azure-openai-responses/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.",
);
});
it("uses codex fallback even when openai-codex provider is configured", () => {
// This test verifies the ordering: codex fallback must fire BEFORE the generic providerCfg fallback.
// If ordering is wrong, the generic fallback would use api: "openai-responses" (the default)

View File

@ -8,6 +8,10 @@ import { buildModelAliasLines } from "../model-alias-lines.js";
import { isSecretRefHeaderValueMarker } from "../model-auth-markers.js";
import { resolveForwardCompatModel } from "../model-forward-compat.js";
import { findNormalizedProviderValue, normalizeProviderId } from "../model-selection.js";
import {
buildSuppressedBuiltInModelError,
shouldSuppressBuiltInModel,
} from "../model-suppression.js";
import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js";
import { normalizeResolvedProviderModel } from "./model.provider-normalization.js";
@ -159,6 +163,9 @@ export function resolveModelWithRegistry(params: {
cfg?: OpenClawConfig;
}): Model<Api> | undefined {
const { provider, modelId, modelRegistry, cfg } = params;
if (shouldSuppressBuiltInModel({ provider, id: modelId })) {
return undefined;
}
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
@ -303,6 +310,10 @@ const LOCAL_PROVIDER_HINTS: Record<string, string> = {
};
function buildUnknownModelError(provider: string, modelId: string): string {
const suppressed = buildSuppressedBuiltInModelError({ provider, id: modelId });
if (suppressed) {
return suppressed;
}
const base = `Unknown model: ${provider}/${modelId}`;
const hint = LOCAL_PROVIDER_HINTS[provider.toLowerCase()];
return hint ? `${base}. ${hint}` : base;

View File

@ -163,6 +163,30 @@ describe("models list/status", () => {
baseUrl: "https://api.openai.com/v1",
contextWindow: 128000,
};
const OPENAI_SPARK_MODEL = {
provider: "openai",
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
input: ["text", "image"],
baseUrl: "https://api.openai.com/v1",
contextWindow: 128000,
};
const OPENAI_CODEX_SPARK_MODEL = {
provider: "openai-codex",
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
input: ["text"],
baseUrl: "https://chatgpt.com/backend-api",
contextWindow: 128000,
};
const AZURE_OPENAI_SPARK_MODEL = {
provider: "azure-openai-responses",
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
input: ["text", "image"],
baseUrl: "https://example.openai.azure.com/openai/v1",
contextWindow: 128000,
};
const GOOGLE_ANTIGRAVITY_TEMPLATE_BASE = {
provider: "google-antigravity",
api: "google-gemini-cli",
@ -363,6 +387,34 @@ describe("models list/status", () => {
expect(ensureOpenClawModelsJson).not.toHaveBeenCalled();
});
it("filters stale direct OpenAI spark rows from models list and registry views", async () => {
setDefaultModel("openai-codex/gpt-5.3-codex-spark");
modelRegistryState.models = [
OPENAI_SPARK_MODEL,
AZURE_OPENAI_SPARK_MODEL,
OPENAI_CODEX_SPARK_MODEL,
];
modelRegistryState.available = [
OPENAI_SPARK_MODEL,
AZURE_OPENAI_SPARK_MODEL,
OPENAI_CODEX_SPARK_MODEL,
];
const runtime = makeRuntime();
await modelsListCommand({ all: true, json: true }, runtime);
const payload = parseJsonLog(runtime);
expect(payload.models.map((model: { key: string }) => model.key)).toEqual([
"openai-codex/gpt-5.3-codex-spark",
]);
const loaded = await loadModelRegistry({} as never);
expect(loaded.models.map((model) => `${model.provider}/${model.id}`)).toEqual([
"openai-codex/gpt-5.3-codex-spark",
]);
expect(Array.from(loaded.availableKeys ?? [])).toEqual(["openai-codex/gpt-5.3-codex-spark"]);
});
it("modelsListCommand persists using the write snapshot config when provided", async () => {
modelRegistryState.models = [OPENAI_MODEL];
modelRegistryState.available = [OPENAI_MODEL];

View File

@ -347,5 +347,55 @@ describe("modelsListCommand forward-compat", () => {
}),
]);
});
it("suppresses direct openai gpt-5.3-codex-spark rows in --all output", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [
{
provider: "openai",
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 32000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
{
provider: "azure-openai-responses",
id: "gpt-5.3-codex-spark",
name: "GPT-5.3 Codex Spark",
api: "azure-openai-responses",
baseUrl: "https://example.openai.azure.com/openai/v1",
input: ["text", "image"],
contextWindow: 128000,
maxTokens: 32000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
{ ...OPENAI_CODEX_53_MODEL },
],
availableKeys: new Set([
"openai/gpt-5.3-codex-spark",
"azure-openai-responses/gpt-5.3-codex-spark",
"openai-codex/gpt-5.3-codex",
]),
registry: {
getAll: () => [{ ...OPENAI_CODEX_53_MODEL }],
},
});
mocks.loadModelCatalog.mockResolvedValueOnce([]);
const runtime = createRuntime();
await modelsListCommand({ all: true, json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
expect(lastPrintedRows<{ key: string }>()).toEqual([
expect.objectContaining({
key: "openai-codex/gpt-5.3-codex",
}),
]);
});
});
});

View File

@ -8,6 +8,7 @@ import {
resolveAwsSdkEnvVarName,
resolveEnvApiKey,
} from "../../agents/model-auth.js";
import { shouldSuppressBuiltInModel } from "../../agents/model-suppression.js";
import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js";
import type { OpenClawConfig } from "../../config/config.js";
import {
@ -87,7 +88,9 @@ function loadAvailableModels(registry: ModelRegistry): Model<Api>[] {
throw normalizeAvailabilityError(err);
}
try {
return validateAvailableModels(availableModels);
return validateAvailableModels(availableModels).filter(
(model) => !shouldSuppressBuiltInModel({ provider: model.provider, id: model.id }),
);
} catch (err) {
throw normalizeAvailabilityError(err);
}
@ -100,7 +103,9 @@ export async function loadModelRegistry(
const agentDir = resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(agentDir);
const registry = discoverModels(authStorage, agentDir);
const models = registry.getAll();
const models = registry
.getAll()
.filter((model) => !shouldSuppressBuiltInModel({ provider: model.provider, id: model.id }));
let availableKeys: Set<string> | undefined;
let availabilityErrorMessage: string | undefined;

View File

@ -2,6 +2,7 @@ import type { Api, Model } from "@mariozechner/pi-ai";
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
import type { AuthProfileStore } from "../../agents/auth-profiles.js";
import { loadModelCatalog } from "../../agents/model-catalog.js";
import { shouldSuppressBuiltInModel } from "../../agents/model-suppression.js";
import { resolveModelWithRegistry } from "../../agents/pi-embedded-runner/model.js";
import type { OpenClawConfig } from "../../config/config.js";
import { loadModelRegistry, toModelRow } from "./list.registry.js";
@ -79,6 +80,9 @@ export function appendDiscoveredRows(params: {
});
for (const model of sorted) {
if (shouldSuppressBuiltInModel({ provider: model.provider, id: model.id })) {
continue;
}
if (!matchesRowFilter(params.context.filter, model)) {
continue;
}

View File

@ -20,6 +20,7 @@ import {
} from "../agents/live-auth-keys.js";
import { isModernModelRef } from "../agents/live-model-filter.js";
import { getApiKeyForModel } from "../agents/model-auth.js";
import { shouldSuppressBuiltInModel } from "../agents/model-suppression.js";
import { ensureOpenClawModelsJson } from "../agents/models-config.js";
import { isRateLimitErrorMessage } from "../agents/pi-embedded-helpers/errors.js";
import { discoverAuthStorage, discoverModels } from "../agents/pi-model-discovery.js";
@ -1339,6 +1340,9 @@ describeLive("gateway live (dev agent, profile keys)", () => {
const providerProfileCache = new Map<string, boolean>();
const candidates: Array<Model<Api>> = [];
for (const model of wanted) {
if (shouldSuppressBuiltInModel({ provider: model.provider, id: model.id })) {
continue;
}
if (PROVIDERS && !PROVIDERS.has(model.provider)) {
continue;
}