fix: cover status transcript fallback (#55041) (thanks @jjjojoj)

This commit is contained in:
Peter Steinberger 2026-04-04 09:35:49 +01:00
parent 97a8ba89fd
commit 238fac6636
4 changed files with 105 additions and 2 deletions

View File

@ -41,6 +41,7 @@ Docs: https://docs.openclaw.ai
- Providers/OpenAI Codex: split native `contextWindow` from runtime `contextTokens`, keep the default effective cap at `272000`, and expose a per-model `contextTokens` override on `models.providers.*.models[]`.
- Providers/compat: stop forcing OpenAI-only defaults on proxy and custom OpenAI-compatible routes, preserve native vendor-specific reasoning/tool/streaming behavior across Anthropic-compatible, Moonshot, Mistral, ModelStudio, OpenRouter, xAI, and Z.ai endpoints, and route GitHub Copilot Claude models through Anthropic Messages instead of OpenAI Responses.
- Providers/Model Studio: preserve native streaming usage reporting for DashScope-compatible endpoints even when they are configured under a generic provider key, so streamed token totals stop sticking at zero. (#52395) Thanks @IVY-AI-gif.
- Status/usage: let `/status` and `session_status` fall back to transcript token totals when the session meta store stayed at zero, so LM Studio, Ollama, DashScope, and similar OpenAI-compatible providers stop showing `Context: 0/...`. (#55041) Thanks @jjjojoj.
- Plugins/OpenAI: enable `gpt-image-1` reference-image edits through `/images/edits` multipart uploads, and stop inferring unsupported resolution overrides when no explicit `size` or `resolution` is provided.
- Gateway/startup: default `gateway.mode` to `local` when unset, detect PID recycling in gateway lock files on Windows and macOS, and show startup progress so healthy restarts stop getting blocked by stale locks. (#54801, #60085, #59843)
- Mobile pairing/Android: tighten secure endpoint handling so Tailscale and public remote setup reject cleartext endpoints, private LAN pairing still works, merged-role approvals mint both node and operator device tokens, and bootstrap tokens survive node auto-pair until operator approval finishes. (#60128, #60208, #60221)

View File

@ -335,6 +335,25 @@ describe("session_status tool", () => {
expect(details.statusText).not.toContain("OAuth/token status");
});
it("enables transcript usage fallback for session_status", async () => {
resetSessionStore({
main: {
sessionId: "s1",
updatedAt: 10,
},
});
const tool = getSessionStatusTool();
await tool.execute("call-transcript-usage", {});
expect(buildStatusMessageMock).toHaveBeenCalledWith(
expect.objectContaining({
includeTranscriptUsage: true,
}),
);
});
it("errors for unknown session keys", async () => {
resetSessionStore({
main: { sessionId: "s1", updatedAt: 10 },

View File

@ -1,4 +1,8 @@
import fs from "node:fs";
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it } from "vitest";
import { normalizeTestText } from "../../../test/helpers/normalize-text.js";
import { withTempHome } from "../../../test/helpers/temp-home.js";
import {
addSubagentRunForTests,
resetSubagentRegistryForTests,
@ -11,7 +15,7 @@ import {
failTaskRunByRunId,
} from "../../tasks/task-executor.js";
import { resetTaskRegistryForTests } from "../../tasks/task-registry.js";
import { buildStatusReply } from "./commands-status.js";
import { buildStatusReply, buildStatusText } from "./commands-status.js";
import { buildCommandTestParams } from "./commands.test-harness.js";
const baseCfg = {
@ -45,6 +49,41 @@ async function buildStatusReplyForTest(params: { sessionKey?: string; verbose?:
});
}
function writeTranscriptUsageLog(params: {
dir: string;
agentId: string;
sessionId: string;
usage: {
input: number;
output: number;
cacheRead: number;
cacheWrite: number;
totalTokens: number;
};
}) {
const logPath = path.join(
params.dir,
".openclaw",
"agents",
params.agentId,
"sessions",
`${params.sessionId}.jsonl`,
);
fs.mkdirSync(path.dirname(logPath), { recursive: true });
fs.writeFileSync(
logPath,
JSON.stringify({
type: "message",
message: {
role: "assistant",
model: "claude-opus-4-5",
usage: params.usage,
},
}),
"utf-8",
);
}
describe("buildStatusReply subagent summary", () => {
beforeEach(() => {
resetSubagentRegistryForTests();
@ -375,4 +414,47 @@ describe("buildStatusReply subagent summary", () => {
expect(reply?.text).not.toContain("subagent");
expect(reply?.text).not.toContain("cron");
});
it("uses transcript usage fallback in /status output", async () => {
await withTempHome(async (dir) => {
const sessionId = "sess-status-transcript";
writeTranscriptUsageLog({
dir,
agentId: "main",
sessionId,
usage: {
input: 1,
output: 2,
cacheRead: 1000,
cacheWrite: 0,
totalTokens: 1003,
},
});
const text = await buildStatusText({
cfg: baseCfg,
sessionEntry: {
sessionId,
updatedAt: 0,
totalTokens: 3,
contextTokens: 32_000,
},
sessionKey: "agent:main:main",
parentSessionKey: "agent:main:main",
sessionScope: "per-sender",
statusChannel: "whatsapp",
provider: "anthropic",
model: "claude-opus-4-5",
contextTokens: 32_000,
resolvedFastMode: false,
resolvedVerboseLevel: "off",
resolvedReasoningLevel: "off",
resolveDefaultThinkingLevel: async () => undefined,
isGroup: false,
defaultGroupActivation: () => "mention",
});
expect(normalizeTestText(text)).toContain("Context: 1.0k/32k");
});
});
});

View File

@ -149,6 +149,7 @@ export async function buildStatusText(params: {
primaryModelLabelOverride?: string;
modelAuthOverride?: string;
activeModelAuthOverride?: string;
includeTranscriptUsage?: boolean;
}): Promise<string> {
const {
cfg,
@ -345,7 +346,7 @@ export async function buildStatusText(params: {
subagentsLine,
taskLine,
mediaDecisions: params.mediaDecisions,
includeTranscriptUsage: true,
includeTranscriptUsage: params.includeTranscriptUsage ?? true,
});
return statusText;