fix(agents): keep large read tool results visible

This commit is contained in:
Vincent Koc 2026-04-06 02:19:28 +01:00
parent a2cbeefd5f
commit 7d2dc7a9fb
3 changed files with 20 additions and 4 deletions

View File

@ -129,6 +129,7 @@ Docs: https://docs.openclaw.ai
- Discord: keep REST, webhook, and monitor traffic on the configured proxy, preserve component-only media sends, honor `@everyone` and `@here` mention gates, keep ACK reactions on the active account, and split voice connect/playback timeouts so auto-join is more reliable. (#57465, #60361, #60345) Thanks @geekhuashan.
- WhatsApp: restore `channels.whatsapp.blockStreaming` and reset watchdog timeouts after reconnect so quiet chats stop falling into reconnect loops. (#60007, #60069) Thanks @MonkeyLeeT and @mcaxtr.
- Memory: keep `memory-core` builtin embedding registration on the already-registered path so selecting `memory-core` no longer recurses through plugin discovery and crashes during startup. (#61402) Thanks @ngutman.
- Agents/tool results: keep larger `read` outputs visible on big-window models by raising the live tool-result ceiling instead of compacting normal file reads right after the first section. Thanks @vincentkoc.
- Memory/QMD: prefer modern `qmd collection add --glob`, accept newer single-line JSON hit metadata while keeping legacy line fields, refresh QMD docs/doctor install guidance and model-override guidance, and keep older QMD releases working. Thanks @vincentkoc.
- MS Teams: download inline DM images via Graph API and preserve channel reply threading in proactive fallback. (#52212, #55198) Thanks @Ted-developer and @hyojin.
- MS Teams: replace the deprecated Teams SDK HttpPlugin stub with `httpServerAdapter` so recurring gateway deprecation warnings stop firing and the Express 5 compatibility workaround stays on the supported SDK path. (#60939) Thanks @coolramukaka-sys.

View File

@ -199,7 +199,7 @@ describe("calculateMaxToolResultChars", () => {
});
it("exports the live cap through both constant names", () => {
expect(DEFAULT_MAX_LIVE_TOOL_RESULT_CHARS).toBe(40_000);
expect(DEFAULT_MAX_LIVE_TOOL_RESULT_CHARS).toBe(120_000);
expect(HARD_MAX_TOOL_RESULT_CHARS).toBe(DEFAULT_MAX_LIVE_TOOL_RESULT_CHARS);
});
@ -212,6 +212,20 @@ describe("calculateMaxToolResultChars", () => {
const result = calculateMaxToolResultChars(128_000);
expect(result).toBe(DEFAULT_MAX_LIVE_TOOL_RESULT_CHARS);
});
it("keeps moderately large reads intact on 128K contexts", () => {
const messages: AgentMessage[] = [
makeUserMessage("hello"),
makeAssistantMessage("reading changelog"),
makeToolResult("x".repeat(60_000)),
];
const { messages: result, truncatedCount } = truncateOversizedToolResultsInMessages(
messages,
128_000,
);
expect(truncatedCount).toBe(0);
expect(result).toEqual(messages);
});
});
describe("isOversizedToolResult", () => {

View File

@ -18,10 +18,11 @@ const MAX_TOOL_RESULT_CONTEXT_SHARE = 0.3;
*
* Pi already truncates tool results aggressively when serializing old history
* for compaction summaries. For the live request path we keep a larger slice so
* the model can still act on recent tool output, but we still want a bounded
* request-local ceiling that cannot dominate the next turn.
* the model can still act on recent tool output, especially large read results
* on modern 128K+ context models, while still keeping a bounded request-local
* ceiling that cannot dominate the next turn.
*/
export const DEFAULT_MAX_LIVE_TOOL_RESULT_CHARS = 40_000;
export const DEFAULT_MAX_LIVE_TOOL_RESULT_CHARS = 120_000;
/**
* Backwards-compatible alias for older call sites/tests.