From a6bc51f94493fcd09df73b398608cd63fcd0b6a6 Mon Sep 17 00:00:00 2001 From: Yauheni Shauchenka Date: Mon, 30 Mar 2026 06:04:35 +0300 Subject: [PATCH] feat(openai): forward text verbosity (#47106) * feat(openai): forward text verbosity across responses transports * fix(openai): remove stale verbosity rebase artifact * chore(changelog): add openai text verbosity entry --------- Co-authored-by: Ubuntu Co-authored-by: Vincent Koc --- CHANGELOG.md | 1 + src/agents/openai-ws-connection.ts | 1 + src/agents/openai-ws-stream.test.ts | 67 +++++ src/agents/openai-ws-stream.ts | 13 + .../pi-embedded-runner-extraparams.test.ts | 259 ++++++++++++++++++ src/agents/pi-embedded-runner/extra-params.ts | 31 +++ .../openai-stream-wrappers.ts | 54 ++++ src/auto-reply/status.test.ts | 69 +++++ src/auto-reply/status.ts | 30 ++ 9 files changed, 525 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64ce270915b..19efcb4b4a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ Docs: https://docs.openclaw.ai - Docs/zh-CN: add a Chinese Diffs tool page so Chinese readers can access the full Diffs viewer, file-rendering, security, and troubleshooting docs. (#40773) Thanks @elliotllliu. - Docs/zh-CN: align the Chinese Diffs tool page with the current English source and generated translation metadata. Thanks @gumadeiras. - Agents/LLM: add a configurable idle-stream timeout for embedded runner requests so stalled model streams abort cleanly instead of hanging until the broader run timeout fires. (#55072) Thanks @liuy. +- OpenAI/Responses: forward configured `text.verbosity` across Responses HTTP and WebSocket transports, surface it in `/status`, and keep per-agent verbosity precedence aligned with runtime behavior. (#47106) Thanks @merc1305 and @vincentkoc. ### Fixes diff --git a/src/agents/openai-ws-connection.ts b/src/agents/openai-ws-connection.ts index a44e3afc832..339b2a25fe7 100644 --- a/src/agents/openai-ws-connection.ts +++ b/src/agents/openai-ws-connection.ts @@ -238,6 +238,7 @@ export interface ResponseCreateEvent { top_p?: number; metadata?: Record; reasoning?: { effort?: "low" | "medium" | "high"; summary?: "auto" | "concise" | "detailed" }; + text?: { verbosity?: "low" | "medium" | "high"; [key: string]: unknown }; truncation?: "auto" | "disabled"; [key: string]: unknown; } diff --git a/src/agents/openai-ws-stream.test.ts b/src/agents/openai-ws-stream.test.ts index 5f7edd8a6b9..e66104142e6 100644 --- a/src/agents/openai-ws-stream.test.ts +++ b/src/agents/openai-ws-stream.test.ts @@ -21,6 +21,7 @@ import { planTurnInput, releaseWsSession, } from "./openai-ws-stream.js"; +import { log } from "./pi-embedded-runner/logger.js"; // ───────────────────────────────────────────────────────────────────────────── // Mock OpenAIWebSocketManager @@ -1586,6 +1587,72 @@ describe("createOpenAIWebSocketStreamFn", () => { expect(sent.max_output_tokens).toBe(0); }); + it("forwards text verbosity to response.create text block", async () => { + const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-text-verbosity"); + const opts = { textVerbosity: "low" }; + const stream = streamFn( + modelStub as Parameters[0], + contextStub as Parameters[1], + opts as unknown as Parameters[2], + ); + await new Promise((resolve, reject) => { + queueMicrotask(async () => { + try { + await new Promise((r) => setImmediate(r)); + MockManager.lastInstance!.simulateEvent({ + type: "response.completed", + response: makeResponseObject("resp-text-verbosity", "Done"), + }); + for await (const _ of await resolveStream(stream)) { + /* consume */ + } + resolve(); + } catch (e) { + reject(e); + } + }); + }); + const sent = MockManager.lastInstance!.sentEvents[0] as Record; + expect(sent.type).toBe("response.create"); + expect(sent.text).toEqual({ verbosity: "low" }); + }); + + it("warns and skips invalid text verbosity in the websocket path", async () => { + const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined); + try { + const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-text-verbosity-invalid"); + const opts = { textVerbosity: "loud" }; + const stream = streamFn( + modelStub as Parameters[0], + contextStub as Parameters[1], + opts as unknown as Parameters[2], + ); + await new Promise((resolve, reject) => { + queueMicrotask(async () => { + try { + await new Promise((r) => setImmediate(r)); + MockManager.lastInstance!.simulateEvent({ + type: "response.completed", + response: makeResponseObject("resp-text-verbosity-invalid", "Done"), + }); + for await (const _ of await resolveStream(stream)) { + /* consume */ + } + resolve(); + } catch (e) { + reject(e); + } + }); + }); + const sent = MockManager.lastInstance!.sentEvents[0] as Record; + expect(sent.type).toBe("response.create"); + expect(sent).not.toHaveProperty("text"); + expect(warnSpy).toHaveBeenCalledWith("ignoring invalid OpenAI text verbosity param: loud"); + } finally { + warnSpy.mockRestore(); + } + }); + it("forwards reasoningEffort/reasoningSummary to response.create reasoning block", async () => { const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-reason"); const opts = { reasoningEffort: "high", reasoningSummary: "auto" }; diff --git a/src/agents/openai-ws-stream.ts b/src/agents/openai-ws-stream.ts index 6cd39cf093c..a70733a3c58 100644 --- a/src/agents/openai-ws-stream.ts +++ b/src/agents/openai-ws-stream.ts @@ -41,6 +41,7 @@ import { planTurnInput, } from "./openai-ws-message-conversion.js"; import { log } from "./pi-embedded-runner/logger.js"; +import { resolveOpenAITextVerbosity } from "./pi-embedded-runner/openai-stream-wrappers.js"; import { buildAssistantMessageWithZeroUsage, buildStreamErrorAssistantMessage, @@ -441,6 +442,8 @@ export function createOpenAIWebSocketStreamFn( maxTokens?: number; topP?: number; toolChoice?: unknown; + textVerbosity?: string; + text_verbosity?: string; }) | undefined; const extraParams: Record = {}; @@ -466,6 +469,16 @@ export function createOpenAIWebSocketStreamFn( } extraParams.reasoning = reasoning; } + const textVerbosity = resolveOpenAITextVerbosity( + streamOpts as Record | undefined, + ); + if (textVerbosity !== undefined) { + const existingText = + extraParams.text && typeof extraParams.text === "object" + ? (extraParams.text as Record) + : {}; + extraParams.text = { ...existingText, verbosity: textVerbosity }; + } // Respect compat.supportsStore — providers like Gemini reject unknown // fields such as `store` with a 400 error. Fixes #39086. diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index 12bcd71c8ee..a431bf01180 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -280,6 +280,39 @@ describe("resolveExtraParams", () => { }); }); + it("canonicalizes text verbosity alias styles with agent override precedence", () => { + const result = resolveExtraParams({ + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + text_verbosity: "high", + }, + }, + }, + }, + list: [ + { + id: "main", + params: { + textVerbosity: "low", + }, + }, + ], + }, + }, + provider: "openai", + modelId: "gpt-5.4", + agentId: "main", + }); + + expect(result).toEqual({ + text_verbosity: "low", + }); + }); + it("ignores per-agent params when agentId does not match", () => { const result = resolveExtraParams({ cfg: { @@ -1880,6 +1913,135 @@ describe("applyExtraParamsToAgent", () => { expect(payload.service_tier).toBe("priority"); }); + it("injects configured OpenAI text verbosity into Responses payloads", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + textVerbosity: "low", + }, + }, + }, + }, + }, + }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + }); + expect(payload.text).toEqual({ verbosity: "low" }); + }); + + it("injects configured text verbosity into Codex Responses payloads", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai-codex", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.4": { + params: { + text_verbosity: "high", + }, + }, + }, + }, + }, + }, + model: { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.4", + baseUrl: "https://chatgpt.com/backend-api/codex/responses", + } as unknown as Model<"openai-codex-responses">, + payload: { + store: false, + text: { + verbosity: "medium", + }, + }, + }); + expect(payload.text).toEqual({ verbosity: "high" }); + }); + + it("preserves caller-provided payload.text keys when injecting text verbosity", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + text_verbosity: "medium", + }, + }, + }, + }, + }, + }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + payload: { + store: false, + text: { + format: { type: "text" }, + }, + }, + }); + expect(payload.text).toEqual({ + format: { type: "text" }, + verbosity: "medium", + }); + }); + + it("preserves caller-provided payload.text.verbosity for OpenAI Responses", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + textVerbosity: "low", + }, + }, + }, + }, + }, + }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + payload: { + store: false, + text: { + verbosity: "high", + }, + }, + }); + expect(payload.text).toEqual({ verbosity: "high" }); + }); + it("injects configured OpenAI service_tier into Codex Responses payloads", () => { const payload = runResponsesPayloadMutationCase({ applyProvider: "openai-codex", @@ -1938,6 +2100,103 @@ describe("applyExtraParamsToAgent", () => { expect(payload.service_tier).toBe("default"); }); + it("warns and skips invalid OpenAI text verbosity values", () => { + const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined); + try { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + textVerbosity: "loud", + }, + }, + }, + }, + }, + }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + }); + expect(payload).not.toHaveProperty("text"); + expect(warnSpy).toHaveBeenCalledWith("ignoring invalid OpenAI text verbosity param: loud"); + } finally { + warnSpy.mockRestore(); + } + }); + + it("lets null runtime override suppress inherited text verbosity injection", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + textVerbosity: "high", + }, + }, + }, + }, + }, + }, + extraParamsOverride: { + text_verbosity: null, + }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + }); + expect(payload).not.toHaveProperty("text"); + }); + + it("ignores OpenAI text verbosity params for non-OpenAI providers without warning", () => { + const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined); + try { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "anthropic", + applyModelId: "claude-sonnet-4-5", + cfg: { + agents: { + defaults: { + models: { + "anthropic/claude-sonnet-4-5": { + params: { + textVerbosity: "high", + }, + }, + }, + }, + }, + }, + model: { + api: "anthropic-messages", + provider: "anthropic", + id: "claude-sonnet-4-5", + baseUrl: "https://api.anthropic.com", + } as unknown as Model<"anthropic-messages">, + payload: {}, + }); + expect(payload).not.toHaveProperty("text"); + expect(warnSpy).not.toHaveBeenCalled(); + } finally { + warnSpy.mockRestore(); + } + }); + it("maps fast mode to priority service_tier for direct OpenAI Responses", () => { const payload = runResponsesPayloadMutationCase({ applyProvider: "openai", diff --git a/src/agents/pi-embedded-runner/extra-params.ts b/src/agents/pi-embedded-runner/extra-params.ts index 0894e2dacc5..b84bcf3423b 100644 --- a/src/agents/pi-embedded-runner/extra-params.ts +++ b/src/agents/pi-embedded-runner/extra-params.ts @@ -36,8 +36,10 @@ import { createOpenAIFastModeWrapper, createOpenAIResponsesContextManagementWrapper, createOpenAIServiceTierWrapper, + createOpenAITextVerbosityWrapper, resolveOpenAIFastMode, resolveOpenAIServiceTier, + resolveOpenAITextVerbosity, } from "./openai-stream-wrappers.js"; import { streamWithPayloadPatch } from "./stream-payload-utils.js"; @@ -101,6 +103,16 @@ export function resolveExtraParams(params: { delete merged.parallelToolCalls; } + const resolvedTextVerbosity = resolveAliasedParamValue( + [globalParams, agentParams], + "text_verbosity", + "textVerbosity", + ); + if (resolvedTextVerbosity !== undefined) { + merged.text_verbosity = resolvedTextVerbosity; + delete merged.textVerbosity; + } + return merged; } @@ -411,6 +423,25 @@ function applyPostPluginStreamWrappers( ); ctx.agent.streamFn = createOpenAIServiceTierWrapper(ctx.agent.streamFn, openAIServiceTier); } + + const rawTextVerbosity = resolveAliasedParamValue( + [ctx.resolvedExtraParams, ctx.override], + "text_verbosity", + "textVerbosity", + ); + if (rawTextVerbosity === null) { + log.debug("text verbosity suppressed by null override, skipping injection"); + } else if (rawTextVerbosity !== undefined) { + const openAITextVerbosity = resolveOpenAITextVerbosity({ + text_verbosity: rawTextVerbosity, + }); + if (openAITextVerbosity) { + log.debug( + `applying OpenAI text verbosity=${openAITextVerbosity} for ${ctx.provider}/${ctx.modelId}`, + ); + ctx.agent.streamFn = createOpenAITextVerbosityWrapper(ctx.agent.streamFn, openAITextVerbosity); + } + } } // Work around upstream pi-ai hardcoding `store: false` for Responses API. diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index ceae6c672b9..e0f9560e4e4 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -6,6 +6,7 @@ import { log } from "./logger.js"; import { streamWithPayloadPatch } from "./stream-payload-utils.js"; type OpenAIServiceTier = "auto" | "default" | "flex" | "priority"; +type OpenAITextVerbosity = "low" | "medium" | "high"; const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]); const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]); @@ -243,6 +244,29 @@ export function resolveOpenAIServiceTier( return normalized; } +function normalizeOpenAITextVerbosity(value: unknown): OpenAITextVerbosity | undefined { + if (typeof value !== "string") { + return undefined; + } + const normalized = value.trim().toLowerCase(); + if (normalized === "low" || normalized === "medium" || normalized === "high") { + return normalized; + } + return undefined; +} + +export function resolveOpenAITextVerbosity( + extraParams: Record | undefined, +): OpenAITextVerbosity | undefined { + const raw = extraParams?.textVerbosity ?? extraParams?.text_verbosity; + const normalized = normalizeOpenAITextVerbosity(raw); + if (raw !== undefined && normalized === undefined) { + const rawSummary = typeof raw === "string" ? raw : typeof raw; + log.warn(`ignoring invalid OpenAI text verbosity param: ${rawSummary}`); + } + return normalized; +} + function normalizeOpenAIFastMode(value: unknown): boolean | undefined { if (typeof value === "boolean") { return value; @@ -372,6 +396,36 @@ export function createOpenAIServiceTierWrapper( }; } +export function createOpenAITextVerbosityWrapper( + baseStreamFn: StreamFn | undefined, + verbosity: OpenAITextVerbosity, +): StreamFn { + const underlying = baseStreamFn ?? streamSimple; + return (model, context, options) => { + if (model.api !== "openai-responses" && model.api !== "openai-codex-responses") { + return underlying(model, context, options); + } + const shouldOverrideExistingVerbosity = model.api === "openai-codex-responses"; + const originalOnPayload = options?.onPayload; + return underlying(model, context, { + ...options, + onPayload: (payload) => { + if (payload && typeof payload === "object") { + const payloadObj = payload as Record; + const existingText = + payloadObj.text && typeof payloadObj.text === "object" + ? (payloadObj.text as Record) + : {}; + if (shouldOverrideExistingVerbosity || existingText.verbosity === undefined) { + payloadObj.text = { ...existingText, verbosity }; + } + } + return originalOnPayload?.(payload, model); + }, + }); + }; +} + export function createCodexDefaultTransportWrapper(baseStreamFn: StreamFn | undefined): StreamFn { const underlying = baseStreamFn ?? streamSimple; return (model, context, options) => diff --git a/src/auto-reply/status.test.ts b/src/auto-reply/status.test.ts index bc36fc1b8a4..48979ceba6d 100644 --- a/src/auto-reply/status.test.ts +++ b/src/auto-reply/status.test.ts @@ -132,6 +132,75 @@ describe("buildStatusMessage", () => { expect(normalizeTestText(text)).toContain("Fast: on"); }); + it("shows configured text verbosity for the active model", () => { + const text = buildStatusMessage({ + config: { + agents: { + defaults: { + model: "openai-codex/gpt-5.4", + models: { + "openai-codex/gpt-5.4": { + params: { + textVerbosity: "low", + }, + }, + }, + }, + }, + } as unknown as OpenClawConfig, + agent: { + model: "openai-codex/gpt-5.4", + }, + sessionEntry: { + sessionId: "abc", + updatedAt: 0, + }, + sessionKey: "agent:main:main", + queue: { mode: "collect", depth: 0 }, + }); + + expect(normalizeTestText(text)).toContain("Text: low"); + }); + + it("shows per-agent text verbosity overrides for the active model", () => { + const text = buildStatusMessage({ + config: { + agents: { + defaults: { + model: "openai-codex/gpt-5.4", + models: { + "openai-codex/gpt-5.4": { + params: { + textVerbosity: "high", + }, + }, + }, + }, + list: [ + { + id: "main", + params: { + text_verbosity: "low", + }, + }, + ], + }, + } as unknown as OpenClawConfig, + agentId: "main", + agent: { + model: "openai-codex/gpt-5.4", + }, + sessionEntry: { + sessionId: "abc", + updatedAt: 0, + }, + sessionKey: "agent:main:main", + queue: { mode: "collect", depth: 0 }, + }); + + expect(normalizeTestText(text)).toContain("Text: low"); + }); + it("notes channel model overrides in status output", () => { const text = buildStatusMessage({ config: { diff --git a/src/auto-reply/status.ts b/src/auto-reply/status.ts index d611a45dfd9..10d5d6e09ea 100644 --- a/src/auto-reply/status.ts +++ b/src/auto-reply/status.ts @@ -7,6 +7,8 @@ import { resolveConfiguredModelRef, resolveModelRefFromString, } from "../agents/model-selection.js"; +import { resolveExtraParams } from "../agents/pi-embedded-runner/extra-params.js"; +import { resolveOpenAITextVerbosity } from "../agents/pi-embedded-runner/openai-stream-wrappers.js"; import { resolveSandboxRuntimeStatus } from "../agents/sandbox.js"; import type { SkillCommandSpec } from "../agents/skills.js"; import { describeToolForVerbose } from "../agents/tool-description-summary.js"; @@ -118,6 +120,27 @@ function normalizeAuthMode(value?: string): NormalizedAuthMode | undefined { return undefined; } +function resolveConfiguredTextVerbosity(params: { + config?: OpenClawConfig; + agentId?: string; + provider?: string | null; + model?: string | null; +}): "low" | "medium" | "high" | undefined { + const provider = params.provider?.trim(); + const model = params.model?.trim(); + if (!provider || !model || (provider !== "openai" && provider !== "openai-codex")) { + return undefined; + } + return resolveOpenAITextVerbosity( + resolveExtraParams({ + cfg: params.config, + provider, + modelId: model, + agentId: params.agentId, + }), + ); +} + function resolveRuntimeLabel( args: Pick, ): string { @@ -664,10 +687,17 @@ export function buildStatusMessage(args: StatusArgs): string { ? "elevated" : `elevated:${elevatedLevel}` : null; + const textVerbosity = resolveConfiguredTextVerbosity({ + config: args.config, + agentId: args.agentId, + provider: activeProvider, + model: activeModel, + }); const optionParts = [ `Runtime: ${runtime.label}`, `Think: ${thinkLevel}`, fastMode ? "Fast: on" : null, + textVerbosity ? `Text: ${textVerbosity}` : null, verboseLabel, reasoningLevel !== "off" ? `Reasoning: ${reasoningLevel}` : null, elevatedLabel,