mirror of https://github.com/openclaw/openclaw.git
fix(status): hydrate cache usage in transcript fallback (#59247)
* fix(status): hydrate cache usage in transcript fallback * docs(changelog): note status cache fallback fix --------- Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
parent
3100984a33
commit
3f457cabf7
|
|
@ -96,6 +96,7 @@ Docs: https://docs.openclaw.ai
|
|||
- Agents/cache: preserve the full 3-turn prompt-cache image window across tool loops, keep colliding bundled MCP tool definitions deterministic, and reapply Anthropic Vertex cache shaping after payload hook replacements so KV/cache reuse stays stable. Thanks @vincentkoc.
|
||||
- Device pairing: reject rotating device tokens into roles that were never approved during pairing, and keep reconnect role checks bounded to the paired device's approved role set. (#60462) Thanks @eleqtrizit.
|
||||
- Mobile pairing/security: fail closed for internal `/pair` setup-code issuance, cleanup, and approval paths when gateway pairing scopes are missing, and keep approval-time requested-scope enforcement on the internal command path. (#55996) Thanks @coygeek.
|
||||
- Status/cache: restore `cacheRead` and `cacheWrite` in transcript fallback so `/status` keeps showing cache hit percentages when session logs are the only complete usage source. (#59247) Thanks @stuartsy.
|
||||
- Exec approvals/node host: forward prepared `system.run` approval plans on the async node invoke path so mutable script operands keep their approval-time binding and drift revalidation instead of dropping back to unbound execution.
|
||||
- Synology Chat/security: default low-level HTTPS helper TLS verification to on so helper/API defaults match the shipped safe account default, and only explicit `allowInsecureSsl: true` opts out.
|
||||
- Android/canvas security: require exact normalized A2UI URL matches before forwarding canvas bridge actions, rejecting query mismatches and descendant paths while still allowing fragment-only A2UI navigation.
|
||||
|
|
|
|||
|
|
@ -1061,6 +1061,63 @@ describe("buildStatusMessage", () => {
|
|||
);
|
||||
});
|
||||
|
||||
it("hydrates cache usage from transcript fallback", async () => {
|
||||
await withTempHome(
|
||||
async (dir) => {
|
||||
const sessionId = "sess-cache-hydration";
|
||||
writeBaselineTranscriptUsageLog({
|
||||
dir,
|
||||
agentId: "main",
|
||||
sessionId,
|
||||
});
|
||||
|
||||
const text = buildTranscriptStatusText({
|
||||
sessionId,
|
||||
sessionKey: "agent:main:main",
|
||||
});
|
||||
|
||||
expect(normalizeTestText(text)).toContain("Cache: 100% hit · 1.0k cached, 0 new");
|
||||
},
|
||||
{ prefix: "openclaw-status-" },
|
||||
);
|
||||
});
|
||||
|
||||
it("preserves existing nonzero cache usage over transcript fallback values", async () => {
|
||||
await withTempHome(
|
||||
async (dir) => {
|
||||
const sessionId = "sess-cache-preserve";
|
||||
writeBaselineTranscriptUsageLog({
|
||||
dir,
|
||||
agentId: "main",
|
||||
sessionId,
|
||||
});
|
||||
|
||||
const text = buildStatusMessage({
|
||||
agent: {
|
||||
model: "anthropic/claude-opus-4-6",
|
||||
contextTokens: 32_000,
|
||||
},
|
||||
sessionEntry: {
|
||||
sessionId,
|
||||
updatedAt: 0,
|
||||
totalTokens: 3,
|
||||
contextTokens: 32_000,
|
||||
cacheRead: 12,
|
||||
cacheWrite: 34,
|
||||
},
|
||||
sessionKey: "agent:main:main",
|
||||
sessionScope: "per-sender",
|
||||
queue: { mode: "collect", depth: 0 },
|
||||
includeTranscriptUsage: true,
|
||||
modelAuth: "api-key",
|
||||
});
|
||||
|
||||
expect(normalizeTestText(text)).toContain("Cache: 26% hit · 12 cached, 34 new");
|
||||
},
|
||||
{ prefix: "openclaw-status-" },
|
||||
);
|
||||
});
|
||||
|
||||
it("keeps transcript-derived slash model ids on model-only context lookup", async () => {
|
||||
await withTempHome(
|
||||
async (dir) => {
|
||||
|
|
|
|||
|
|
@ -241,6 +241,8 @@ const readUsageFromSessionLog = (
|
|||
| {
|
||||
input: number;
|
||||
output: number;
|
||||
cacheRead: number;
|
||||
cacheWrite: number;
|
||||
promptTokens: number;
|
||||
total: number;
|
||||
model?: string;
|
||||
|
|
@ -321,7 +323,15 @@ const readUsageFromSessionLog = (
|
|||
if (promptTokens === 0 && total === 0) {
|
||||
return undefined;
|
||||
}
|
||||
return { input, output, promptTokens, total, model };
|
||||
return {
|
||||
input,
|
||||
output,
|
||||
cacheRead: lastUsage.cacheRead ?? 0,
|
||||
cacheWrite: lastUsage.cacheWrite ?? 0,
|
||||
promptTokens,
|
||||
total,
|
||||
model,
|
||||
};
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
|
|
@ -554,6 +564,12 @@ export function buildStatusMessage(args: StatusArgs): string {
|
|||
if (!outputTokens || outputTokens === 0) {
|
||||
outputTokens = logUsage.output;
|
||||
}
|
||||
if (typeof cacheRead !== "number" || cacheRead <= 0) {
|
||||
cacheRead = logUsage.cacheRead;
|
||||
}
|
||||
if (typeof cacheWrite !== "number" || cacheWrite <= 0) {
|
||||
cacheWrite = logUsage.cacheWrite;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue