fix: align native openai transport defaults

This commit is contained in:
Peter Steinberger 2026-04-04 01:19:00 +01:00
parent bc16b9dccf
commit 628c71103e
No known key found for this signature in database
12 changed files with 245 additions and 23 deletions

View File

@ -21,6 +21,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Providers/OpenAI: preserve native `reasoning.effort: "none"` and strict tool schemas on direct OpenAI-family endpoints, keep OpenAI-compatible proxies on the older compat shim path, and enable OpenAI WebSocket warm-up by default for native Responses routes.
- Skills/uv install: block workspace `.env` from overriding `UV_PYTHON` and strip related interpreter override keys from uv skill-install subprocesses so repository-controlled env files cannot steer the selected Python runtime. (#59178) Thanks @pgondhi987.
- Telegram/reactions: preserve `reactionNotifications: "own"` across gateway restarts by persisting sent-message ownership state instead of treating cold cache as a permissive fallback. (#59207) Thanks @samzong.
- Gateway/startup: detect PID recycling in gateway lock files on Windows and macOS, and add startup progress so stale lock conflicts no longer block healthy restarts. (#59843) Thanks @TonyDerek-dot.

View File

@ -239,6 +239,20 @@ Example:
Session overrides win over config. Clearing the session override in the Sessions UI
returns the session to the configured default.
### Native OpenAI versus OpenAI-compatible routes
OpenClaw treats direct OpenAI, Codex, and Azure OpenAI endpoints differently
from generic OpenAI-compatible `/v1` proxies:
- native `openai/*`, `openai-codex/*`, and Azure OpenAI routes keep
`reasoning: { effort: "none" }` intact when you explicitly disable reasoning
- native OpenAI-family routes default tool schemas to strict mode
- proxy-style OpenAI-compatible routes keep the looser compat behavior and do
not force strict tool schemas or native-only request shaping
This preserves current native OpenAI Responses behavior without forcing older
OpenAI-compatible shims onto third-party `/v1` backends.
### OpenAI Responses server-side compaction
For direct OpenAI Responses models (`openai/*` using `api: "openai-responses"` with

View File

@ -33,7 +33,7 @@ import { wrapOpenAICodexProviderStream } from "./stream-hooks.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 400_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_54_COST = {
input: 2.5,

View File

@ -163,6 +163,39 @@ describe("buildOpenAIProvider", () => {
);
});
it("keeps GPT-5.4 family metadata aligned with native OpenAI docs", () => {
const provider = buildOpenAIProvider();
const codexProvider = buildOpenAICodexProviderPlugin();
const openaiModel = provider.resolveDynamicModel?.({
provider: "openai",
modelId: "gpt-5.4",
modelRegistry: { find: () => null },
} as never);
const codexModel = codexProvider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4",
modelRegistry: { find: () => null },
} as never);
expect(openaiModel).toMatchObject({
provider: "openai",
id: "gpt-5.4",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
contextWindow: 1_050_000,
maxTokens: 128_000,
});
expect(codexModel).toMatchObject({
provider: "openai-codex",
id: "gpt-5.4",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
contextWindow: 400_000,
maxTokens: 128_000,
});
});
it("keeps modern live selection on OpenAI 5.2+ and Codex 5.2+", () => {
const provider = buildOpenAIProvider();
const codexProvider = buildOpenAICodexProviderPlugin();
@ -289,11 +322,11 @@ describe("buildOpenAIProvider", () => {
expect(extraParams).toMatchObject({
transport: "auto",
openaiWsWarmup: false,
openaiWsWarmup: true,
});
expect(result.payload.service_tier).toBe("priority");
expect(result.payload.text).toEqual({ verbosity: "low" });
expect(result.payload).not.toHaveProperty("reasoning");
expect(result.payload.reasoning).toEqual({ effort: "none" });
});
it("owns Azure OpenAI reasoning compatibility without forcing OpenAI transport defaults", () => {
@ -315,7 +348,7 @@ describe("buildOpenAIProvider", () => {
expect(result.options?.transport).toBeUndefined();
expect(result.options?.openaiWsWarmup).toBeUndefined();
expect(result.payload).not.toHaveProperty("reasoning");
expect(result.payload.reasoning).toEqual({ effort: "none" });
});
it("owns Codex wrapper composition for responses payloads", () => {

View File

@ -24,7 +24,7 @@ const OPENAI_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
const OPENAI_GPT_54_NANO_MODEL_ID = "gpt-5.4-nano";
const OPENAI_GPT_54_CONTEXT_TOKENS = 272_000;
const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_PRO_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_MINI_CONTEXT_TOKENS = 400_000;
const OPENAI_GPT_54_NANO_CONTEXT_TOKENS = 400_000;
@ -248,7 +248,7 @@ export function buildOpenAIProvider(): ProviderPlugin {
return {
...ctx.extraParams,
...(hasSupportedTransport ? {} : { transport: "auto" }),
...(hasExplicitWarmup ? {} : { openaiWsWarmup: false }),
...(hasExplicitWarmup ? {} : { openaiWsWarmup: true }),
};
},
wrapStreamFn: (ctx) =>

View File

@ -58,7 +58,7 @@ describe("openai responses payload policy", () => {
expect(payload).not.toHaveProperty("prompt_cache_retention");
});
it("strips disabled reasoning payloads through the shared helper", () => {
it("keeps disabled reasoning payloads on native OpenAI responses routes", () => {
const payload = {
reasoning: {
effort: "none",
@ -77,6 +77,32 @@ describe("openai responses payload policy", () => {
),
);
expect(payload).toEqual({
reasoning: {
effort: "none",
},
});
});
it("strips disabled reasoning payloads for proxy-like OpenAI responses routes", () => {
const payload = {
reasoning: {
effort: "none",
},
} satisfies Record<string, unknown>;
applyOpenAIResponsesPayloadPolicy(
payload,
resolveOpenAIResponsesPayloadPolicy(
{
api: "openai-responses",
provider: "openai",
baseUrl: "https://proxy.example.com/v1",
},
{ storeMode: "disable" },
),
);
expect(payload).not.toHaveProperty("reasoning");
});
});

View File

@ -119,7 +119,8 @@ export function resolveOpenAIResponsesPayloadPolicy(
parsePositiveInteger(options.extraParams?.responsesCompactThreshold) ??
resolveOpenAIResponsesCompactThreshold(model),
explicitStore,
shouldStripDisabledReasoningPayload: capabilities.supportsOpenAIReasoningCompatPayload,
shouldStripDisabledReasoningPayload:
capabilities.supportsOpenAIReasoningCompatPayload && !capabilities.usesKnownNativeOpenAIRoute,
shouldStripPromptCache:
options.enablePromptCacheStripping === true && capabilities.shouldStripResponsesPromptCache,
shouldStripStore:

View File

@ -439,6 +439,68 @@ describe("openai transport stream", () => {
expect(params.input?.[0]).toMatchObject({ role: "developer" });
});
it("defaults responses tool schemas to strict on native OpenAI routes", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-responses">,
{
systemPrompt: "system",
messages: [],
tools: [
{
name: "lookup_weather",
description: "Get forecast",
parameters: { type: "object", properties: {} },
},
],
} as never,
undefined,
) as { tools?: Array<{ strict?: boolean }> };
expect(params.tools?.[0]?.strict).toBe(true);
});
it("omits responses strict tool shaping for proxy-like OpenAI routes", () => {
const params = buildOpenAIResponsesParams(
{
id: "custom-model",
name: "Custom Model",
api: "openai-responses",
provider: "openai",
baseUrl: "https://proxy.example.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-responses">,
{
systemPrompt: "system",
messages: [],
tools: [
{
name: "lookup_weather",
description: "Get forecast",
parameters: { type: "object", properties: {} },
},
],
} as never,
undefined,
) as { tools?: Array<{ strict?: boolean }> };
expect(params.tools?.[0]).not.toHaveProperty("strict");
});
it("gates responses service_tier to native OpenAI endpoints", () => {
const nativeParams = buildOpenAIResponsesParams(
{
@ -695,6 +757,37 @@ describe("openai transport stream", () => {
expect(params.tools?.[0]?.function).not.toHaveProperty("strict");
});
it("defaults completions tool schemas to strict on native OpenAI routes", () => {
const params = buildOpenAICompletionsParams(
{
id: "gpt-5",
name: "GPT-5",
api: "openai-completions",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
{
systemPrompt: "system",
messages: [],
tools: [
{
name: "lookup_weather",
description: "Get forecast",
parameters: { type: "object", properties: {} },
},
],
} as never,
undefined,
) as { tools?: Array<{ function?: { strict?: boolean } }> };
expect(params.tools?.[0]?.function?.strict).toBe(true);
});
it("uses Mistral compat defaults for direct Mistral completions providers", () => {
const params = buildOpenAICompletionsParams(
{

View File

@ -328,7 +328,15 @@ function convertResponsesTools(
tools: NonNullable<Context["tools"]>,
options?: { strict?: boolean | null },
): FunctionTool[] {
const strict = options?.strict === undefined ? false : options.strict;
const strict = options?.strict;
if (strict === undefined) {
return tools.map((tool) => ({
type: "function",
name: tool.name,
description: tool.description,
parameters: tool.parameters,
})) as unknown as FunctionTool[];
}
return tools.map((tool) => ({
type: "function",
name: tool.name,
@ -698,7 +706,9 @@ export function buildOpenAIResponsesParams(
params.service_tier = options.serviceTier;
}
if (context.tools) {
params.tools = convertResponsesTools(context.tools);
params.tools = convertResponsesTools(context.tools, {
strict: resolveOpenAIStrictToolSetting(model as OpenAIModeModel),
});
}
if (model.reasoning) {
if (options?.reasoningEffort || options?.reasoningSummary) {
@ -1156,14 +1166,56 @@ function mapReasoningEffort(effort: string, reasoningEffortMap: Record<string, s
return reasoningEffortMap[effort] ?? effort;
}
function convertTools(tools: NonNullable<Context["tools"]>, compat: ReturnType<typeof getCompat>) {
function resolvesToNativeOpenAIStrictTools(model: OpenAIModeModel): boolean {
const capabilities = resolveProviderRequestCapabilities({
provider: model.provider,
api: model.api,
baseUrl: model.baseUrl,
capability: "llm",
transport: "stream",
modelId: model.id,
compat:
model.compat && typeof model.compat === "object"
? (model.compat as { supportsStore?: boolean })
: undefined,
});
if (!capabilities.usesKnownNativeOpenAIRoute) {
return false;
}
return (
capabilities.provider === "openai" ||
capabilities.provider === "openai-codex" ||
capabilities.provider === "azure-openai" ||
capabilities.provider === "azure-openai-responses"
);
}
function resolveOpenAIStrictToolSetting(
model: OpenAIModeModel,
compat?: ReturnType<typeof getCompat>,
): boolean | undefined {
if (resolvesToNativeOpenAIStrictTools(model)) {
return true;
}
if (compat?.supportsStrictMode) {
return false;
}
return undefined;
}
function convertTools(
tools: NonNullable<Context["tools"]>,
compat: ReturnType<typeof getCompat>,
model: OpenAIModeModel,
) {
const strict = resolveOpenAIStrictToolSetting(model, compat);
return tools.map((tool) => ({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters,
...(compat.supportsStrictMode ? { strict: false } : {}),
...(strict === undefined ? {} : { strict }),
},
}));
}
@ -1196,7 +1248,7 @@ export function buildOpenAICompletionsParams(
params.temperature = options.temperature;
}
if (context.tools) {
params.tools = convertTools(context.tools, compat);
params.tools = convertTools(context.tools, compat, model);
} else if (hasToolHistory(context.messages)) {
params.tools = [];
}

View File

@ -707,7 +707,7 @@ describe("applyExtraParamsToAgent", () => {
expect(payloads[0]).not.toHaveProperty("reasoning_effort");
});
it("strips disabled reasoning payloads for native OpenAI responses routes", () => {
it("keeps disabled reasoning payloads for native OpenAI responses routes", () => {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
const payload: Record<string, unknown> = {
@ -731,7 +731,9 @@ describe("applyExtraParamsToAgent", () => {
void agent.streamFn?.(model, context, {});
expect(payloads).toHaveLength(1);
expect(payloads[0]).not.toHaveProperty("reasoning");
expect(payloads[0]).toEqual({
reasoning: { effort: "none", summary: "auto" },
});
});
it("keeps disabled reasoning payloads for proxied OpenAI responses routes", () => {
@ -1572,7 +1574,7 @@ describe("applyExtraParamsToAgent", () => {
expect(calls[0]?.transport).toBe("auto");
});
it("defaults OpenAI transport to auto without websocket warm-up", () => {
it("defaults OpenAI transport to auto with websocket warm-up", () => {
const { calls, agent } = createOptionsCaptureAgent();
applyExtraParamsToAgent(agent, undefined, "openai", "gpt-5");
@ -1587,7 +1589,7 @@ describe("applyExtraParamsToAgent", () => {
expect(calls).toHaveLength(1);
expect(calls[0]?.transport).toBe("auto");
expect(calls[0]?.openaiWsWarmup).toBe(false);
expect(calls[0]?.openaiWsWarmup).toBe(true);
});
it("injects native Codex web_search for direct openai-codex Responses models", () => {
@ -2165,7 +2167,7 @@ describe("applyExtraParamsToAgent", () => {
expect(payload.store).toBe(true);
});
it("strips disabled OpenAI reasoning payloads instead of sending effort:none", () => {
it("keeps disabled OpenAI reasoning payloads on native Responses routes", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5-mini",
@ -2180,10 +2182,10 @@ describe("applyExtraParamsToAgent", () => {
reasoning: { effort: "none" },
},
});
expect(payload).not.toHaveProperty("reasoning");
expect(payload.reasoning).toEqual({ effort: "none" });
});
it("strips disabled Azure OpenAI Responses reasoning payloads", () => {
it("keeps disabled Azure OpenAI Responses reasoning payloads", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "azure-openai-responses",
applyModelId: "gpt-5-mini",
@ -2198,7 +2200,7 @@ describe("applyExtraParamsToAgent", () => {
reasoning: { effort: "none" },
},
});
expect(payload).not.toHaveProperty("reasoning");
expect(payload.reasoning).toEqual({ effort: "none" });
});
it("injects configured OpenAI service_tier into Responses payloads", () => {

View File

@ -359,7 +359,7 @@ export function createOpenAIDefaultTransportWrapper(baseStreamFn: StreamFn | und
const mergedOptions = {
...options,
transport: options?.transport ?? "auto",
openaiWsWarmup: typedOptions?.openaiWsWarmup ?? false,
openaiWsWarmup: typedOptions?.openaiWsWarmup ?? true,
} as SimpleStreamOptions;
return underlying(model, context, mergedOptions);
};

View File

@ -268,7 +268,7 @@ describe("modelsListCommand forward-compat", () => {
id: "gpt-5.4",
name: "GPT-5.3 Codex",
input: ["text"],
contextWindow: 272000,
contextWindow: 400000,
},
]);
mocks.listProfilesForProvider.mockImplementation((_: unknown, provider: string) =>