mirror of https://github.com/openclaw/openclaw.git
fix: use stream-json output for Claude CLI backend to prevent watchdog timeouts
The Claude CLI backend uses `--output-format json`, which produces no
stdout until the entire request completes. When session context is large
(100K+ tokens) or API response is slow, the no-output watchdog timer
(max 180s for resume sessions) kills the process before it finishes,
resulting in "CLI produced no output for 180s and was terminated" errors.
Switch to `--output-format stream-json --verbose` so Claude CLI emits
NDJSON events throughout processing (init, assistant, rate_limit, result).
Each event resets the watchdog timer, which is the intended behavior —
the watchdog detects truly stuck processes, not slow-but-progressing ones.
Changes:
- cli-backends.ts: `json` → `stream-json --verbose`, `output: "jsonl"`
- helpers.ts: teach parseCliJsonl to extract text from Claude's
`{"type":"result","result":"..."}` NDJSON line
Note: `--verbose` is required for stream-json in `-p` (print) mode.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
4ad7d51c01
commit
623f4d3056
|
|
@ -38,6 +38,7 @@ Docs: https://docs.openclaw.ai
|
|||
- CLI/onboarding: show the Kimi Code API key option again in the Moonshot setup menu so the interactive picker includes all Kimi setup paths together. Fixes #54412 Thanks @sparkyrider
|
||||
- Agents/status: use provider-aware context window lookup for fresh Anthropic 4.6 model overrides so `/status` shows the correct 1.0m window instead of an underreported shared-cache minimum. (#54796) Thanks @neeravmakwana.
|
||||
- Agents/errors: surface provider quota/reset details when available, but keep HTML/Cloudflare rate-limit pages on the generic fallback so raw error pages are not shown to users. (#54512) Thanks @bugkill3r.
|
||||
- Claude CLI: switch the bundled Claude CLI backend to `stream-json` output so watchdogs see progress on long runs, and keep session/usage metadata even when Claude finishes with an empty result line. (#49698) Thanks @felear2022.
|
||||
- Agents/embedded replies: surface mid-turn 429 and overload failures when embedded runs end without a user-visible reply, while preserving successful media-only replies that still use legacy `mediaUrl`. (#50930) Thanks @infichen.
|
||||
- WhatsApp/allowFrom: show a specific allowFrom policy error for valid blocked targets instead of the misleading `<E.164|group JID>` format hint. Thanks @mcaxtr.
|
||||
- Agents/cooldowns: scope rate-limit cooldowns per model so one 429 no longer blocks every model on the same auth profile, replace the exponential 1 min -> 1 h escalation with a stepped 30 s / 1 min / 5 min ladder, and surface a user-facing countdown message when all models are rate-limited. (#49834) Thanks @kiranvk-2011.
|
||||
|
|
|
|||
|
|
@ -78,17 +78,25 @@ export function buildAnthropicCliBackend(): CliBackendPlugin {
|
|||
bundleMcp: true,
|
||||
config: {
|
||||
command: "claude",
|
||||
args: ["-p", "--output-format", "json", "--permission-mode", "bypassPermissions"],
|
||||
args: [
|
||||
"-p",
|
||||
"--output-format",
|
||||
"stream-json",
|
||||
"--verbose",
|
||||
"--permission-mode",
|
||||
"bypassPermissions",
|
||||
],
|
||||
resumeArgs: [
|
||||
"-p",
|
||||
"--output-format",
|
||||
"json",
|
||||
"stream-json",
|
||||
"--verbose",
|
||||
"--permission-mode",
|
||||
"bypassPermissions",
|
||||
"--resume",
|
||||
"{sessionId}",
|
||||
],
|
||||
output: "json",
|
||||
output: "jsonl",
|
||||
input: "arg",
|
||||
modelArg: "--model",
|
||||
modelAliases: CLAUDE_MODEL_ALIASES,
|
||||
|
|
|
|||
|
|
@ -92,9 +92,14 @@ describe("resolveCliBackendConfig claude-cli defaults", () => {
|
|||
const resolved = resolveCliBackendConfig("claude-cli");
|
||||
|
||||
expect(resolved).not.toBeNull();
|
||||
expect(resolved?.config.output).toBe("jsonl");
|
||||
expect(resolved?.config.args).toContain("stream-json");
|
||||
expect(resolved?.config.args).toContain("--verbose");
|
||||
expect(resolved?.config.args).toContain("--permission-mode");
|
||||
expect(resolved?.config.args).toContain("bypassPermissions");
|
||||
expect(resolved?.config.args).not.toContain("--dangerously-skip-permissions");
|
||||
expect(resolved?.config.resumeArgs).toContain("stream-json");
|
||||
expect(resolved?.config.resumeArgs).toContain("--verbose");
|
||||
expect(resolved?.config.resumeArgs).toContain("--permission-mode");
|
||||
expect(resolved?.config.resumeArgs).toContain("bypassPermissions");
|
||||
expect(resolved?.config.resumeArgs).not.toContain("--dangerously-skip-permissions");
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import type { ImageContent } from "@mariozechner/pi-ai";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { MAX_IMAGE_BYTES } from "../media/constants.js";
|
||||
import { buildCliArgs, loadPromptRefImages } from "./cli-runner/helpers.js";
|
||||
import { buildCliArgs, loadPromptRefImages, parseCliJsonl } from "./cli-runner/helpers.js";
|
||||
import * as promptImageUtils from "./pi-embedded-runner/run/images.js";
|
||||
import type { SandboxFsBridge } from "./sandbox/fs-bridge.js";
|
||||
import * as toolImages from "./tool-images.js";
|
||||
|
|
@ -118,3 +118,74 @@ describe("buildCliArgs", () => {
|
|||
).toEqual(["exec", "resume", "thread-123", "--model", "gpt-5.4"]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseCliJsonl", () => {
|
||||
it("parses Claude stream-json result events", () => {
|
||||
const result = parseCliJsonl(
|
||||
[
|
||||
JSON.stringify({ type: "init", session_id: "session-123" }),
|
||||
JSON.stringify({
|
||||
type: "result",
|
||||
session_id: "session-123",
|
||||
result: "Claude says hello",
|
||||
usage: {
|
||||
input_tokens: 12,
|
||||
output_tokens: 3,
|
||||
cache_read_input_tokens: 4,
|
||||
},
|
||||
}),
|
||||
].join("\n"),
|
||||
{
|
||||
command: "claude",
|
||||
output: "jsonl",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "Claude says hello",
|
||||
sessionId: "session-123",
|
||||
usage: {
|
||||
input: 12,
|
||||
output: 3,
|
||||
cacheRead: 4,
|
||||
cacheWrite: undefined,
|
||||
total: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves Claude session metadata even when the final result text is empty", () => {
|
||||
const result = parseCliJsonl(
|
||||
[
|
||||
JSON.stringify({ type: "init", session_id: "session-456" }),
|
||||
JSON.stringify({
|
||||
type: "result",
|
||||
session_id: "session-456",
|
||||
result: " ",
|
||||
usage: {
|
||||
input_tokens: 18,
|
||||
output_tokens: 0,
|
||||
},
|
||||
}),
|
||||
].join("\n"),
|
||||
{
|
||||
command: "claude",
|
||||
output: "jsonl",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "",
|
||||
sessionId: "session-456",
|
||||
usage: {
|
||||
input: 18,
|
||||
output: undefined,
|
||||
cacheRead: undefined,
|
||||
cacheWrite: undefined,
|
||||
total: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -233,6 +233,22 @@ export function parseCliJsonl(raw: string, backend: CliBackendConfig): CliOutput
|
|||
if (isRecord(parsed.usage)) {
|
||||
usage = toUsage(parsed.usage) ?? usage;
|
||||
}
|
||||
|
||||
// Claude stream-json: {"type":"result","result":"...","session_id":"...","usage":{...}}
|
||||
if (
|
||||
typeof parsed.type === "string" &&
|
||||
parsed.type === "result" &&
|
||||
typeof parsed.result === "string"
|
||||
) {
|
||||
const resultText = parsed.result.trim();
|
||||
if (resultText) {
|
||||
return { text: resultText, sessionId, usage };
|
||||
}
|
||||
// Claude may finish with an empty result after tool-only work. Keep the
|
||||
// resolved session handle and usage instead of dropping them.
|
||||
return { text: "", sessionId, usage };
|
||||
}
|
||||
|
||||
const item = isRecord(parsed.item) ? parsed.item : null;
|
||||
if (item && typeof item.text === "string") {
|
||||
const type = typeof item.type === "string" ? item.type.toLowerCase() : "";
|
||||
|
|
|
|||
Loading…
Reference in New Issue