feat(openai): forward text verbosity (#47106)

* feat(openai): forward text verbosity across responses transports

* fix(openai): remove stale verbosity rebase artifact

* chore(changelog): add openai text verbosity entry

---------

Co-authored-by: Ubuntu <ubuntu@vps-1c82b947.vps.ovh.net>
Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
Yauheni Shauchenka 2026-03-30 06:04:35 +03:00 committed by GitHub
parent 51e053d0e8
commit a6bc51f944
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 525 additions and 0 deletions

View File

@ -19,6 +19,7 @@ Docs: https://docs.openclaw.ai
- Docs/zh-CN: add a Chinese Diffs tool page so Chinese readers can access the full Diffs viewer, file-rendering, security, and troubleshooting docs. (#40773) Thanks @elliotllliu.
- Docs/zh-CN: align the Chinese Diffs tool page with the current English source and generated translation metadata. Thanks @gumadeiras.
- Agents/LLM: add a configurable idle-stream timeout for embedded runner requests so stalled model streams abort cleanly instead of hanging until the broader run timeout fires. (#55072) Thanks @liuy.
- OpenAI/Responses: forward configured `text.verbosity` across Responses HTTP and WebSocket transports, surface it in `/status`, and keep per-agent verbosity precedence aligned with runtime behavior. (#47106) Thanks @merc1305 and @vincentkoc.
### Fixes

View File

@ -238,6 +238,7 @@ export interface ResponseCreateEvent {
top_p?: number;
metadata?: Record<string, string>;
reasoning?: { effort?: "low" | "medium" | "high"; summary?: "auto" | "concise" | "detailed" };
text?: { verbosity?: "low" | "medium" | "high"; [key: string]: unknown };
truncation?: "auto" | "disabled";
[key: string]: unknown;
}

View File

@ -21,6 +21,7 @@ import {
planTurnInput,
releaseWsSession,
} from "./openai-ws-stream.js";
import { log } from "./pi-embedded-runner/logger.js";
// ─────────────────────────────────────────────────────────────────────────────
// Mock OpenAIWebSocketManager
@ -1586,6 +1587,72 @@ describe("createOpenAIWebSocketStreamFn", () => {
expect(sent.max_output_tokens).toBe(0);
});
it("forwards text verbosity to response.create text block", async () => {
const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-text-verbosity");
const opts = { textVerbosity: "low" };
const stream = streamFn(
modelStub as Parameters<typeof streamFn>[0],
contextStub as Parameters<typeof streamFn>[1],
opts as unknown as Parameters<typeof streamFn>[2],
);
await new Promise<void>((resolve, reject) => {
queueMicrotask(async () => {
try {
await new Promise((r) => setImmediate(r));
MockManager.lastInstance!.simulateEvent({
type: "response.completed",
response: makeResponseObject("resp-text-verbosity", "Done"),
});
for await (const _ of await resolveStream(stream)) {
/* consume */
}
resolve();
} catch (e) {
reject(e);
}
});
});
const sent = MockManager.lastInstance!.sentEvents[0] as Record<string, unknown>;
expect(sent.type).toBe("response.create");
expect(sent.text).toEqual({ verbosity: "low" });
});
it("warns and skips invalid text verbosity in the websocket path", async () => {
const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined);
try {
const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-text-verbosity-invalid");
const opts = { textVerbosity: "loud" };
const stream = streamFn(
modelStub as Parameters<typeof streamFn>[0],
contextStub as Parameters<typeof streamFn>[1],
opts as unknown as Parameters<typeof streamFn>[2],
);
await new Promise<void>((resolve, reject) => {
queueMicrotask(async () => {
try {
await new Promise((r) => setImmediate(r));
MockManager.lastInstance!.simulateEvent({
type: "response.completed",
response: makeResponseObject("resp-text-verbosity-invalid", "Done"),
});
for await (const _ of await resolveStream(stream)) {
/* consume */
}
resolve();
} catch (e) {
reject(e);
}
});
});
const sent = MockManager.lastInstance!.sentEvents[0] as Record<string, unknown>;
expect(sent.type).toBe("response.create");
expect(sent).not.toHaveProperty("text");
expect(warnSpy).toHaveBeenCalledWith("ignoring invalid OpenAI text verbosity param: loud");
} finally {
warnSpy.mockRestore();
}
});
it("forwards reasoningEffort/reasoningSummary to response.create reasoning block", async () => {
const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-reason");
const opts = { reasoningEffort: "high", reasoningSummary: "auto" };

View File

@ -41,6 +41,7 @@ import {
planTurnInput,
} from "./openai-ws-message-conversion.js";
import { log } from "./pi-embedded-runner/logger.js";
import { resolveOpenAITextVerbosity } from "./pi-embedded-runner/openai-stream-wrappers.js";
import {
buildAssistantMessageWithZeroUsage,
buildStreamErrorAssistantMessage,
@ -441,6 +442,8 @@ export function createOpenAIWebSocketStreamFn(
maxTokens?: number;
topP?: number;
toolChoice?: unknown;
textVerbosity?: string;
text_verbosity?: string;
})
| undefined;
const extraParams: Record<string, unknown> = {};
@ -466,6 +469,16 @@ export function createOpenAIWebSocketStreamFn(
}
extraParams.reasoning = reasoning;
}
const textVerbosity = resolveOpenAITextVerbosity(
streamOpts as Record<string, unknown> | undefined,
);
if (textVerbosity !== undefined) {
const existingText =
extraParams.text && typeof extraParams.text === "object"
? (extraParams.text as Record<string, unknown>)
: {};
extraParams.text = { ...existingText, verbosity: textVerbosity };
}
// Respect compat.supportsStore — providers like Gemini reject unknown
// fields such as `store` with a 400 error. Fixes #39086.

View File

@ -280,6 +280,39 @@ describe("resolveExtraParams", () => {
});
});
it("canonicalizes text verbosity alias styles with agent override precedence", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
text_verbosity: "high",
},
},
},
},
list: [
{
id: "main",
params: {
textVerbosity: "low",
},
},
],
},
},
provider: "openai",
modelId: "gpt-5.4",
agentId: "main",
});
expect(result).toEqual({
text_verbosity: "low",
});
});
it("ignores per-agent params when agentId does not match", () => {
const result = resolveExtraParams({
cfg: {
@ -1880,6 +1913,135 @@ describe("applyExtraParamsToAgent", () => {
expect(payload.service_tier).toBe("priority");
});
it("injects configured OpenAI text verbosity into Responses payloads", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
textVerbosity: "low",
},
},
},
},
},
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
});
expect(payload.text).toEqual({ verbosity: "low" });
});
it("injects configured text verbosity into Codex Responses payloads", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai-codex",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai-codex/gpt-5.4": {
params: {
text_verbosity: "high",
},
},
},
},
},
},
model: {
api: "openai-codex-responses",
provider: "openai-codex",
id: "gpt-5.4",
baseUrl: "https://chatgpt.com/backend-api/codex/responses",
} as unknown as Model<"openai-codex-responses">,
payload: {
store: false,
text: {
verbosity: "medium",
},
},
});
expect(payload.text).toEqual({ verbosity: "high" });
});
it("preserves caller-provided payload.text keys when injecting text verbosity", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
text_verbosity: "medium",
},
},
},
},
},
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
payload: {
store: false,
text: {
format: { type: "text" },
},
},
});
expect(payload.text).toEqual({
format: { type: "text" },
verbosity: "medium",
});
});
it("preserves caller-provided payload.text.verbosity for OpenAI Responses", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
textVerbosity: "low",
},
},
},
},
},
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
payload: {
store: false,
text: {
verbosity: "high",
},
},
});
expect(payload.text).toEqual({ verbosity: "high" });
});
it("injects configured OpenAI service_tier into Codex Responses payloads", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai-codex",
@ -1938,6 +2100,103 @@ describe("applyExtraParamsToAgent", () => {
expect(payload.service_tier).toBe("default");
});
it("warns and skips invalid OpenAI text verbosity values", () => {
const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined);
try {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
textVerbosity: "loud",
},
},
},
},
},
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
});
expect(payload).not.toHaveProperty("text");
expect(warnSpy).toHaveBeenCalledWith("ignoring invalid OpenAI text verbosity param: loud");
} finally {
warnSpy.mockRestore();
}
});
it("lets null runtime override suppress inherited text verbosity injection", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5.4": {
params: {
textVerbosity: "high",
},
},
},
},
},
},
extraParamsOverride: {
text_verbosity: null,
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
});
expect(payload).not.toHaveProperty("text");
});
it("ignores OpenAI text verbosity params for non-OpenAI providers without warning", () => {
const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined);
try {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-5",
cfg: {
agents: {
defaults: {
models: {
"anthropic/claude-sonnet-4-5": {
params: {
textVerbosity: "high",
},
},
},
},
},
},
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-5",
baseUrl: "https://api.anthropic.com",
} as unknown as Model<"anthropic-messages">,
payload: {},
});
expect(payload).not.toHaveProperty("text");
expect(warnSpy).not.toHaveBeenCalled();
} finally {
warnSpy.mockRestore();
}
});
it("maps fast mode to priority service_tier for direct OpenAI Responses", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",

View File

@ -36,8 +36,10 @@ import {
createOpenAIFastModeWrapper,
createOpenAIResponsesContextManagementWrapper,
createOpenAIServiceTierWrapper,
createOpenAITextVerbosityWrapper,
resolveOpenAIFastMode,
resolveOpenAIServiceTier,
resolveOpenAITextVerbosity,
} from "./openai-stream-wrappers.js";
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
@ -101,6 +103,16 @@ export function resolveExtraParams(params: {
delete merged.parallelToolCalls;
}
const resolvedTextVerbosity = resolveAliasedParamValue(
[globalParams, agentParams],
"text_verbosity",
"textVerbosity",
);
if (resolvedTextVerbosity !== undefined) {
merged.text_verbosity = resolvedTextVerbosity;
delete merged.textVerbosity;
}
return merged;
}
@ -411,6 +423,25 @@ function applyPostPluginStreamWrappers(
);
ctx.agent.streamFn = createOpenAIServiceTierWrapper(ctx.agent.streamFn, openAIServiceTier);
}
const rawTextVerbosity = resolveAliasedParamValue(
[ctx.resolvedExtraParams, ctx.override],
"text_verbosity",
"textVerbosity",
);
if (rawTextVerbosity === null) {
log.debug("text verbosity suppressed by null override, skipping injection");
} else if (rawTextVerbosity !== undefined) {
const openAITextVerbosity = resolveOpenAITextVerbosity({
text_verbosity: rawTextVerbosity,
});
if (openAITextVerbosity) {
log.debug(
`applying OpenAI text verbosity=${openAITextVerbosity} for ${ctx.provider}/${ctx.modelId}`,
);
ctx.agent.streamFn = createOpenAITextVerbosityWrapper(ctx.agent.streamFn, openAITextVerbosity);
}
}
}
// Work around upstream pi-ai hardcoding `store: false` for Responses API.

View File

@ -6,6 +6,7 @@ import { log } from "./logger.js";
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
type OpenAITextVerbosity = "low" | "medium" | "high";
const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]);
const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]);
@ -243,6 +244,29 @@ export function resolveOpenAIServiceTier(
return normalized;
}
function normalizeOpenAITextVerbosity(value: unknown): OpenAITextVerbosity | undefined {
if (typeof value !== "string") {
return undefined;
}
const normalized = value.trim().toLowerCase();
if (normalized === "low" || normalized === "medium" || normalized === "high") {
return normalized;
}
return undefined;
}
export function resolveOpenAITextVerbosity(
extraParams: Record<string, unknown> | undefined,
): OpenAITextVerbosity | undefined {
const raw = extraParams?.textVerbosity ?? extraParams?.text_verbosity;
const normalized = normalizeOpenAITextVerbosity(raw);
if (raw !== undefined && normalized === undefined) {
const rawSummary = typeof raw === "string" ? raw : typeof raw;
log.warn(`ignoring invalid OpenAI text verbosity param: ${rawSummary}`);
}
return normalized;
}
function normalizeOpenAIFastMode(value: unknown): boolean | undefined {
if (typeof value === "boolean") {
return value;
@ -372,6 +396,36 @@ export function createOpenAIServiceTierWrapper(
};
}
export function createOpenAITextVerbosityWrapper(
baseStreamFn: StreamFn | undefined,
verbosity: OpenAITextVerbosity,
): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
return (model, context, options) => {
if (model.api !== "openai-responses" && model.api !== "openai-codex-responses") {
return underlying(model, context, options);
}
const shouldOverrideExistingVerbosity = model.api === "openai-codex-responses";
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
const payloadObj = payload as Record<string, unknown>;
const existingText =
payloadObj.text && typeof payloadObj.text === "object"
? (payloadObj.text as Record<string, unknown>)
: {};
if (shouldOverrideExistingVerbosity || existingText.verbosity === undefined) {
payloadObj.text = { ...existingText, verbosity };
}
}
return originalOnPayload?.(payload, model);
},
});
};
}
export function createCodexDefaultTransportWrapper(baseStreamFn: StreamFn | undefined): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
return (model, context, options) =>

View File

@ -132,6 +132,75 @@ describe("buildStatusMessage", () => {
expect(normalizeTestText(text)).toContain("Fast: on");
});
it("shows configured text verbosity for the active model", () => {
const text = buildStatusMessage({
config: {
agents: {
defaults: {
model: "openai-codex/gpt-5.4",
models: {
"openai-codex/gpt-5.4": {
params: {
textVerbosity: "low",
},
},
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "openai-codex/gpt-5.4",
},
sessionEntry: {
sessionId: "abc",
updatedAt: 0,
},
sessionKey: "agent:main:main",
queue: { mode: "collect", depth: 0 },
});
expect(normalizeTestText(text)).toContain("Text: low");
});
it("shows per-agent text verbosity overrides for the active model", () => {
const text = buildStatusMessage({
config: {
agents: {
defaults: {
model: "openai-codex/gpt-5.4",
models: {
"openai-codex/gpt-5.4": {
params: {
textVerbosity: "high",
},
},
},
},
list: [
{
id: "main",
params: {
text_verbosity: "low",
},
},
],
},
} as unknown as OpenClawConfig,
agentId: "main",
agent: {
model: "openai-codex/gpt-5.4",
},
sessionEntry: {
sessionId: "abc",
updatedAt: 0,
},
sessionKey: "agent:main:main",
queue: { mode: "collect", depth: 0 },
});
expect(normalizeTestText(text)).toContain("Text: low");
});
it("notes channel model overrides in status output", () => {
const text = buildStatusMessage({
config: {

View File

@ -7,6 +7,8 @@ import {
resolveConfiguredModelRef,
resolveModelRefFromString,
} from "../agents/model-selection.js";
import { resolveExtraParams } from "../agents/pi-embedded-runner/extra-params.js";
import { resolveOpenAITextVerbosity } from "../agents/pi-embedded-runner/openai-stream-wrappers.js";
import { resolveSandboxRuntimeStatus } from "../agents/sandbox.js";
import type { SkillCommandSpec } from "../agents/skills.js";
import { describeToolForVerbose } from "../agents/tool-description-summary.js";
@ -118,6 +120,27 @@ function normalizeAuthMode(value?: string): NormalizedAuthMode | undefined {
return undefined;
}
function resolveConfiguredTextVerbosity(params: {
config?: OpenClawConfig;
agentId?: string;
provider?: string | null;
model?: string | null;
}): "low" | "medium" | "high" | undefined {
const provider = params.provider?.trim();
const model = params.model?.trim();
if (!provider || !model || (provider !== "openai" && provider !== "openai-codex")) {
return undefined;
}
return resolveOpenAITextVerbosity(
resolveExtraParams({
cfg: params.config,
provider,
modelId: model,
agentId: params.agentId,
}),
);
}
function resolveRuntimeLabel(
args: Pick<StatusArgs, "config" | "agent" | "sessionKey" | "sessionScope">,
): string {
@ -664,10 +687,17 @@ export function buildStatusMessage(args: StatusArgs): string {
? "elevated"
: `elevated:${elevatedLevel}`
: null;
const textVerbosity = resolveConfiguredTextVerbosity({
config: args.config,
agentId: args.agentId,
provider: activeProvider,
model: activeModel,
});
const optionParts = [
`Runtime: ${runtime.label}`,
`Think: ${thinkLevel}`,
fastMode ? "Fast: on" : null,
textVerbosity ? `Text: ${textVerbosity}` : null,
verboseLabel,
reasoningLevel !== "off" ? `Reasoning: ${reasoningLevel}` : null,
elevatedLabel,