fix(status): recompute fallback context window (#51795)

* fix(status): recompute fallback context window

* fix(status): keep live context token caps on fallback

* fix(status): preserve fallback runtime context windows

* fix(status): preserve configured fallback context caps

* fix(status): keep provider-aware transcript context lookups

* fix(status): preserve explicit fallback context caps

* fix(status): clamp fallback configured context caps

* fix(status): keep raw runtime slash ids

* fix(status): refresh plugin-sdk api baseline

* fix(status): preserve fallback context lookup

* test(status): refresh plugin-sdk api baseline

* fix(status): keep runtime slash-id context lookup

---------

Co-authored-by: create <create@createdeMacBook-Pro.local>
Co-authored-by: Frank Yang <frank.ekn@gmail.com>
Co-authored-by: RichardCao <RichardCao@users.noreply.github.com>
This commit is contained in:
RichardCao 2026-03-23 17:08:48 +08:00 committed by GitHub
parent b186d9847c
commit a835c200f3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 704 additions and 24 deletions

View File

@ -2359,7 +2359,7 @@
"exportName": "buildCommandsMessage",
"kind": "function",
"source": {
"line": 847,
"line": 955,
"path": "src/auto-reply/status.ts"
}
},
@ -2368,7 +2368,7 @@
"exportName": "buildCommandsMessagePaginated",
"kind": "function",
"source": {
"line": 856,
"line": 964,
"path": "src/auto-reply/status.ts"
}
},
@ -2404,7 +2404,7 @@
"exportName": "buildHelpMessage",
"kind": "function",
"source": {
"line": 727,
"line": 835,
"path": "src/auto-reply/status.ts"
}
},

View File

@ -258,12 +258,12 @@
{"declaration":"export type ChannelSetupWizard = ChannelSetupWizard;","entrypoint":"channel-setup","exportName":"ChannelSetupWizard","importSpecifier":"openclaw/plugin-sdk/channel-setup","kind":"type","recordType":"export","sourceLine":247,"sourcePath":"src/channels/plugins/setup-wizard.ts"}
{"declaration":"export type OptionalChannelSetupSurface = OptionalChannelSetupSurface;","entrypoint":"channel-setup","exportName":"OptionalChannelSetupSurface","importSpecifier":"openclaw/plugin-sdk/channel-setup","kind":"type","recordType":"export","sourceLine":29,"sourcePath":"src/plugin-sdk/channel-setup.ts"}
{"category":"channel","entrypoint":"command-auth","importSpecifier":"openclaw/plugin-sdk/command-auth","recordType":"module","sourceLine":1,"sourcePath":"src/plugin-sdk/command-auth.ts"}
{"declaration":"export function buildCommandsMessage(cfg?: OpenClawConfig | undefined, skillCommands?: SkillCommandSpec[] | undefined, options?: CommandsMessageOptions | undefined): string;","entrypoint":"command-auth","exportName":"buildCommandsMessage","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":847,"sourcePath":"src/auto-reply/status.ts"}
{"declaration":"export function buildCommandsMessagePaginated(cfg?: OpenClawConfig | undefined, skillCommands?: SkillCommandSpec[] | undefined, options?: CommandsMessageOptions | undefined): CommandsMessageResult;","entrypoint":"command-auth","exportName":"buildCommandsMessagePaginated","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":856,"sourcePath":"src/auto-reply/status.ts"}
{"declaration":"export function buildCommandsMessage(cfg?: OpenClawConfig | undefined, skillCommands?: SkillCommandSpec[] | undefined, options?: CommandsMessageOptions | undefined): string;","entrypoint":"command-auth","exportName":"buildCommandsMessage","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":955,"sourcePath":"src/auto-reply/status.ts"}
{"declaration":"export function buildCommandsMessagePaginated(cfg?: OpenClawConfig | undefined, skillCommands?: SkillCommandSpec[] | undefined, options?: CommandsMessageOptions | undefined): CommandsMessageResult;","entrypoint":"command-auth","exportName":"buildCommandsMessagePaginated","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":964,"sourcePath":"src/auto-reply/status.ts"}
{"declaration":"export function buildCommandsPaginationKeyboard(currentPage: number, totalPages: number, agentId?: string | undefined): { text: string; callback_data: string; }[][];","entrypoint":"command-auth","exportName":"buildCommandsPaginationKeyboard","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":89,"sourcePath":"src/auto-reply/reply/commands-info.ts"}
{"declaration":"export function buildCommandText(commandName: string, args?: string | undefined): string;","entrypoint":"command-auth","exportName":"buildCommandText","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":199,"sourcePath":"src/auto-reply/commands-registry.ts"}
{"declaration":"export function buildCommandTextFromArgs(command: ChatCommandDefinition, args?: CommandArgs | undefined): string;","entrypoint":"command-auth","exportName":"buildCommandTextFromArgs","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":291,"sourcePath":"src/auto-reply/commands-registry.ts"}
{"declaration":"export function buildHelpMessage(cfg?: OpenClawConfig | undefined): string;","entrypoint":"command-auth","exportName":"buildHelpMessage","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":727,"sourcePath":"src/auto-reply/status.ts"}
{"declaration":"export function buildHelpMessage(cfg?: OpenClawConfig | undefined): string;","entrypoint":"command-auth","exportName":"buildHelpMessage","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":835,"sourcePath":"src/auto-reply/status.ts"}
{"declaration":"export function buildModelsProviderData(cfg: OpenClawConfig, agentId?: string | undefined): Promise<ModelsProviderData>;","entrypoint":"command-auth","exportName":"buildModelsProviderData","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":37,"sourcePath":"src/auto-reply/reply/commands-models.ts"}
{"declaration":"export function createPreCryptoDirectDmAuthorizer(params: { resolveAccess: (senderId: string) => Promise<ResolvedInboundDirectDmAccess | Pick<ResolvedInboundDirectDmAccess, \"access\">>; issuePairingChallenge?: ((params: { ...; }) => Promise<...>) | undefined; onBlocked?: ((params: { ...; }) => void) | undefined; }): (input: { ...; }) => Promise<...>;","entrypoint":"command-auth","exportName":"createPreCryptoDirectDmAuthorizer","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":105,"sourcePath":"src/plugin-sdk/direct-dm.ts"}
{"declaration":"export function findCommandByNativeName(name: string, provider?: string | undefined): ChatCommandDefinition | undefined;","entrypoint":"command-auth","exportName":"findCommandByNativeName","importSpecifier":"openclaw/plugin-sdk/command-auth","kind":"function","recordType":"export","sourceLine":187,"sourcePath":"src/auto-reply/commands-registry.ts"}

View File

@ -484,6 +484,10 @@ export function createSessionStatusTool(opts?: {
model: agentModel,
},
agentId,
explicitConfiguredContextTokens:
typeof agentDefaults.contextTokens === "number" && agentDefaults.contextTokens > 0
? agentDefaults.contextTokens
: undefined,
sessionEntry: resolved.entry,
sessionKey: resolved.key,
sessionStorePath: storePath,

View File

@ -186,6 +186,10 @@ export async function buildStatusReply(params: {
elevatedDefault: agentDefaults.elevatedDefault,
},
agentId: statusAgentId,
explicitConfiguredContextTokens:
typeof agentDefaults.contextTokens === "number" && agentDefaults.contextTokens > 0
? agentDefaults.contextTokens
: undefined,
sessionEntry,
sessionKey,
parentSessionKey,

View File

@ -3,6 +3,7 @@ import path from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { normalizeTestText } from "../../test/helpers/normalize-text.js";
import { withTempHome } from "../../test/helpers/temp-home.js";
import { MODEL_CONTEXT_TOKEN_CACHE } from "../agents/context-cache.js";
import type { OpenClawConfig } from "../config/config.js";
import { applyModelOverrideToSessionEntry } from "../sessions/model-overrides.js";
import { createSuccessfulImageMediaDecision } from "./media-understanding.test-fixtures.js";
@ -25,6 +26,7 @@ vi.mock("../plugins/commands.js", () => ({
afterEach(() => {
vi.restoreAllMocks();
MODEL_CONTEXT_TOKEN_CACHE.clear();
});
describe("buildStatusMessage", () => {
@ -223,6 +225,313 @@ describe("buildStatusMessage", () => {
expect(normalizeTestText(text)).toContain("Context: 1.0k/66k");
});
it("recomputes context window from the active fallback model when session contextTokens are stale", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
"minimax-portal": {
models: [{ id: "MiniMax-M2.5", contextWindow: 200_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
},
sessionEntry: {
sessionId: "fallback-context-window",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "minimax-portal",
model: "MiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "minimax-portal/MiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
contextTokens: 1_048_576,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: minimax-portal/MiniMax-M2.5");
expect(normalized).toContain("Context: 49k/200k");
expect(normalized).not.toContain("Context: 49k/1.0m");
});
it("keeps an explicit runtime context limit when fallback status already computed one", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
"minimax-portal": {
models: [{ id: "MiniMax-M2.5", contextWindow: 200_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
},
runtimeContextTokens: 123_456,
sessionEntry: {
sessionId: "fallback-context-window-live-limit",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "minimax-portal",
model: "MiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "minimax-portal/MiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
contextTokens: 1_048_576,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: minimax-portal/MiniMax-M2.5");
expect(normalized).toContain("Context: 49k/123k");
expect(normalized).not.toContain("Context: 49k/1.0m");
expect(normalized).not.toContain("Context: 49k/200k");
});
it("keeps the persisted runtime context limit for fallback sessions when no live override is passed", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
"minimax-portal": {
models: [{ id: "MiniMax-M2.5", contextWindow: 200_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
},
sessionEntry: {
sessionId: "fallback-context-window-persisted-limit",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "minimax-portal",
model: "MiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "minimax-portal/MiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
contextTokens: 123_456,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: minimax-portal/MiniMax-M2.5");
expect(normalized).toContain("Context: 49k/123k");
expect(normalized).not.toContain("Context: 49k/1.0m");
expect(normalized).not.toContain("Context: 49k/200k");
});
it("keeps an explicit configured context cap for fallback status before runtime snapshot persists", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
"minimax-portal": {
models: [{ id: "MiniMax-M2.5", contextWindow: 200_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
contextTokens: 120_000,
},
explicitConfiguredContextTokens: 120_000,
sessionEntry: {
sessionId: "fallback-context-window-configured-cap",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "minimax-portal",
model: "MiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "minimax-portal/MiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: minimax-portal/MiniMax-M2.5");
expect(normalized).toContain("Context: 49k/120k");
expect(normalized).not.toContain("Context: 49k/200k");
expect(normalized).not.toContain("Context: 49k/1.0m");
});
it("keeps an explicit configured context cap even when it matches the selected model window", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
"minimax-portal": {
models: [{ id: "MiniMax-M2.5", contextWindow: 200_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 128_000 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
contextTokens: 128_000,
},
explicitConfiguredContextTokens: 128_000,
sessionEntry: {
sessionId: "fallback-context-window-configured-cap-equals-selected",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "minimax-portal",
model: "MiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "minimax-portal/MiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: minimax-portal/MiniMax-M2.5");
expect(normalized).toContain("Context: 49k/128k");
expect(normalized).not.toContain("Context: 49k/200k");
});
it("clamps an explicit configured context cap to the active fallback window", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
"minimax-portal": {
models: [{ id: "MiniMax-M2.5", contextWindow: 200_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
contextTokens: 1_048_576,
},
explicitConfiguredContextTokens: 1_048_576,
sessionEntry: {
sessionId: "fallback-context-window-configured-cap-clamped",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "minimax-portal",
model: "MiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "minimax-portal/MiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: minimax-portal/MiniMax-M2.5");
expect(normalized).toContain("Context: 49k/200k");
expect(normalized).not.toContain("Context: 49k/1.0m");
});
it("keeps a persisted fallback limit when the active runtime model lookup is unavailable", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
contextTokens: 1_048_576,
},
explicitConfiguredContextTokens: 1_048_576,
sessionEntry: {
sessionId: "fallback-context-window-persisted-unknown-active",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "custom-runtime",
model: "unknown-fallback-model",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "custom-runtime/unknown-fallback-model",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
contextTokens: 128_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: custom-runtime/unknown-fallback-model");
expect(normalized).toContain("Context: 49k/128k");
expect(normalized).not.toContain("Context: 49k/1.0m");
});
it("uses per-agent sandbox config when config and session key are provided", () => {
const text = buildStatusMessage({
config: {
@ -526,6 +835,7 @@ describe("buildStatusMessage", () => {
dir: string;
agentId: string;
sessionId: string;
model?: string;
usage: {
input: number;
output: number;
@ -550,7 +860,7 @@ describe("buildStatusMessage", () => {
type: "message",
message: {
role: "assistant",
model: "claude-opus-4-5",
model: params.model ?? "claude-opus-4-5",
usage: params.usage,
},
}),
@ -681,6 +991,254 @@ describe("buildStatusMessage", () => {
{ prefix: "openclaw-status-" },
);
});
it("keeps transcript-derived slash model ids on model-only context lookup", async () => {
await withTempHome(
async (dir) => {
MODEL_CONTEXT_TOKEN_CACHE.set("google/gemini-2.5-pro", 999_000);
const sessionId = "sess-openrouter-google";
writeTranscriptUsageLog({
dir,
agentId: "main",
sessionId,
model: "google/gemini-2.5-pro",
usage: {
input: 2,
output: 3,
cacheRead: 1200,
cacheWrite: 0,
totalTokens: 1205,
},
});
const text = buildStatusMessage({
config: {
models: {
providers: {
google: {
models: [{ id: "gemini-2.5-pro", contextWindow: 2_000_000 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "openrouter/google/gemini-2.5-pro",
},
sessionEntry: {
sessionId,
updatedAt: 0,
totalTokens: 5,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
includeTranscriptUsage: true,
modelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Context: 1.2k/999k");
expect(normalized).not.toContain("Context: 1.2k/2.0m");
},
{ prefix: "openclaw-status-" },
);
});
it("keeps runtime slash model ids on model-only context lookup when modelProvider is missing", () => {
MODEL_CONTEXT_TOKEN_CACHE.set("google/gemini-2.5-pro", 999_000);
const text = buildStatusMessage({
config: {
models: {
providers: {
google: {
models: [{ id: "gemini-2.5-pro", contextWindow: 2_000_000 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "openrouter/google/gemini-2.5-pro",
},
sessionEntry: {
sessionId: "sess-runtime-slash-id",
updatedAt: 0,
totalTokens: 1205,
model: "google/gemini-2.5-pro",
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Context: 1.2k/999k");
expect(normalized).not.toContain("Context: 1.2k/2.0m");
});
it("keeps provider-aware lookup for legacy fallback runtime slash ids", () => {
MODEL_CONTEXT_TOKEN_CACHE.clear();
const text = buildStatusMessage({
config: {
models: {
providers: {
"fake-minimax": {
models: [{ id: "FakeMiniMax-M2.5", contextWindow: 777_000 }],
},
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 1_048_576 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
},
sessionEntry: {
sessionId: "sess-runtime-slash-id-fallback",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
model: "fake-minimax/FakeMiniMax-M2.5",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "fake-minimax/FakeMiniMax-M2.5",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: fake-minimax/FakeMiniMax-M2.5");
expect(normalized).toContain("Context: 49k/777k");
expect(normalized).not.toContain("Context: 49k/200k");
});
it("keeps provider-aware lookup for non-fallback runtime slash ids", () => {
MODEL_CONTEXT_TOKEN_CACHE.clear();
const text = buildStatusMessage({
config: {
models: {
providers: {
openai: {
models: [{ id: "gpt-4o", contextWindow: 777_000 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "openai/gpt-4o",
},
sessionEntry: {
sessionId: "sess-runtime-slash-id-direct",
updatedAt: 0,
model: "openai/gpt-4o",
totalTokens: 49_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Context: 49k/777k");
expect(normalized).not.toContain("Context: 49k/200k");
});
it("keeps provider-aware lookup for bare transcript model ids", async () => {
await withTempHome(
async (dir) => {
MODEL_CONTEXT_TOKEN_CACHE.set("gemini-2.5-pro", 128_000);
MODEL_CONTEXT_TOKEN_CACHE.set("google-gemini-cli/gemini-2.5-pro", 1_000_000);
const sessionId = "sess-google-bare-model";
writeTranscriptUsageLog({
dir,
agentId: "main",
sessionId,
model: "gemini-2.5-pro",
usage: {
input: 2,
output: 3,
cacheRead: 1200,
cacheWrite: 0,
totalTokens: 1205,
},
});
const text = buildStatusMessage({
agent: {
model: "google-gemini-cli/gemini-2.5-pro",
},
sessionEntry: {
sessionId,
updatedAt: 0,
totalTokens: 5,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
includeTranscriptUsage: true,
modelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Context: 1.2k/1.0m");
expect(normalized).not.toContain("Context: 1.2k/128k");
},
{ prefix: "openclaw-status-" },
);
});
it("does not synthesize a 32k fallback window when the active runtime model is unknown", () => {
const text = buildStatusMessage({
config: {
models: {
providers: {
xiaomi: {
models: [{ id: "mimo-v2-flash", contextWindow: 128_000 }],
},
},
},
} as unknown as OpenClawConfig,
agent: {
model: "xiaomi/mimo-v2-flash",
},
sessionEntry: {
sessionId: "fallback-context-window-unknown-active-model",
updatedAt: 0,
providerOverride: "xiaomi",
modelOverride: "mimo-v2-flash",
modelProvider: "custom-runtime",
model: "unknown-fallback-model",
fallbackNoticeSelectedModel: "xiaomi/mimo-v2-flash",
fallbackNoticeActiveModel: "custom-runtime/unknown-fallback-model",
fallbackNoticeReason: "model not allowed",
totalTokens: 49_000,
contextTokens: 128_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
activeModelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Fallback: custom-runtime/unknown-fallback-model");
expect(normalized).toContain("Context: 49k/128k");
expect(normalized).not.toContain("Context: 49k/32k");
});
});
describe("buildCommandsMessage", () => {

View File

@ -70,6 +70,8 @@ type StatusArgs = {
config?: OpenClawConfig;
agent: AgentConfig;
agentId?: string;
runtimeContextTokens?: number;
explicitConfiguredContextTokens?: number;
sessionEntry?: SessionEntry;
sessionKey?: string;
parentSessionKey?: string;
@ -445,16 +447,46 @@ export function buildStatusMessage(args: StatusArgs): string {
selectedModel,
sessionEntry: entry,
});
const initialFallbackState = resolveActiveFallbackState({
selectedModelRef: modelRefs.selected.label || "unknown",
activeModelRef: modelRefs.active.label || "unknown",
state: entry,
});
let activeProvider = modelRefs.active.provider;
let activeModel = modelRefs.active.model;
let contextTokens =
resolveContextTokensForModel({
cfg: contextConfig,
provider: activeProvider,
model: activeModel,
contextTokensOverride: entry?.contextTokens ?? args.agent?.contextTokens,
fallbackContextTokens: DEFAULT_CONTEXT_TOKENS,
}) ?? DEFAULT_CONTEXT_TOKENS;
let contextLookupProvider: string | undefined = activeProvider;
let contextLookupModel = activeModel;
const runtimeModelRaw = typeof entry?.model === "string" ? entry.model.trim() : "";
const runtimeProviderRaw =
typeof entry?.modelProvider === "string" ? entry.modelProvider.trim() : "";
if (runtimeModelRaw && !runtimeProviderRaw && runtimeModelRaw.includes("/")) {
const slashIndex = runtimeModelRaw.indexOf("/");
const embeddedProvider = runtimeModelRaw.slice(0, slashIndex).trim().toLowerCase();
const fallbackMatchesRuntimeModel =
initialFallbackState.active &&
runtimeModelRaw.toLowerCase() ===
String(entry?.fallbackNoticeActiveModel ?? "")
.trim()
.toLowerCase();
const runtimeMatchesSelectedModel =
runtimeModelRaw.toLowerCase() === (modelRefs.selected.label || "unknown").toLowerCase();
// Legacy fallback sessions can persist provider-qualified runtime ids
// without a separate modelProvider field. Preserve provider-aware lookup
// when the stored slash id is the selected model or the active fallback
// target; otherwise keep the raw model-only lookup for OpenRouter-style
// slash ids.
if (
(fallbackMatchesRuntimeModel || runtimeMatchesSelectedModel) &&
embeddedProvider === activeProvider.toLowerCase()
) {
contextLookupProvider = activeProvider;
contextLookupModel = activeModel;
} else {
contextLookupProvider = undefined;
contextLookupModel = runtimeModelRaw;
}
}
let inputTokens = entry?.inputTokens;
let outputTokens = entry?.outputTokens;
@ -485,19 +517,21 @@ export function buildStatusMessage(args: StatusArgs): string {
if (provider && model) {
activeProvider = provider;
activeModel = model;
// Preserve model-only lookup for transcript-derived provider/model IDs
// like "google/gemini-2.5-pro" that may come from a different upstream
// provider (for example OpenRouter).
contextLookupProvider = undefined;
contextLookupModel = logUsage.model;
}
} else {
activeModel = logUsage.model;
// Bare transcript model IDs should keep provider-aware lookup when the
// active provider is already known so shared model names still resolve
// to the correct provider-specific window.
contextLookupProvider = activeProvider;
contextLookupModel = logUsage.model;
}
}
if (!contextTokens && logUsage.model) {
contextTokens =
resolveContextTokensForModel({
cfg: contextConfig,
model: logUsage.model,
fallbackContextTokens: contextTokens ?? undefined,
}) ?? contextTokens;
}
if (!inputTokens || inputTokens === 0) {
inputTokens = logUsage.input;
}
@ -507,6 +541,87 @@ export function buildStatusMessage(args: StatusArgs): string {
}
}
const activeModelLabel = formatProviderModelRef(activeProvider, activeModel) || "unknown";
const runtimeDiffersFromSelected = activeModelLabel !== (modelRefs.selected.label || "unknown");
const selectedContextTokens = resolveContextTokensForModel({
cfg: contextConfig,
provider: selectedProvider,
model: selectedModel,
});
const activeContextTokens = resolveContextTokensForModel({
cfg: contextConfig,
...(contextLookupProvider ? { provider: contextLookupProvider } : {}),
model: contextLookupModel,
});
const persistedContextTokens =
typeof entry?.contextTokens === "number" && entry.contextTokens > 0
? entry.contextTokens
: undefined;
const explicitRuntimeContextTokens =
typeof args.runtimeContextTokens === "number" && args.runtimeContextTokens > 0
? args.runtimeContextTokens
: undefined;
const explicitConfiguredContextTokens =
typeof args.explicitConfiguredContextTokens === "number" &&
args.explicitConfiguredContextTokens > 0
? args.explicitConfiguredContextTokens
: undefined;
const cappedConfiguredContextTokens =
typeof explicitConfiguredContextTokens === "number"
? typeof activeContextTokens === "number"
? Math.min(explicitConfiguredContextTokens, activeContextTokens)
: explicitConfiguredContextTokens
: undefined;
// When a fallback model is active, the selected-model context limit that
// callers keep on the agent config is often stale. Prefer an explicit runtime
// snapshot when available. Separately, callers can pass an explicit configured
// cap that should still apply on fallback paths, but it cannot exceed the
// active runtime window when that window is known. Persisted runtime snapshots
// still take precedence over configured caps so historical fallback sessions
// keep their last known live limit even if the active model later becomes
// unresolvable.
const contextTokens = runtimeDiffersFromSelected
? (explicitRuntimeContextTokens ??
(() => {
if (persistedContextTokens !== undefined) {
const persistedLooksSelectedWindow =
typeof selectedContextTokens === "number" &&
persistedContextTokens === selectedContextTokens;
const activeWindowDiffersFromSelected =
typeof selectedContextTokens === "number" &&
typeof activeContextTokens === "number" &&
activeContextTokens !== selectedContextTokens;
const explicitConfiguredMatchesPersisted =
typeof explicitConfiguredContextTokens === "number" &&
explicitConfiguredContextTokens === persistedContextTokens;
if (
persistedLooksSelectedWindow &&
activeWindowDiffersFromSelected &&
!explicitConfiguredMatchesPersisted
) {
return activeContextTokens;
}
if (typeof activeContextTokens === "number") {
return Math.min(persistedContextTokens, activeContextTokens);
}
return persistedContextTokens;
}
if (cappedConfiguredContextTokens !== undefined) {
return cappedConfiguredContextTokens;
}
if (typeof activeContextTokens === "number") {
return activeContextTokens;
}
return DEFAULT_CONTEXT_TOKENS;
})())
: (resolveContextTokensForModel({
cfg: contextConfig,
...(contextLookupProvider ? { provider: contextLookupProvider } : {}),
model: contextLookupModel,
contextTokensOverride: persistedContextTokens ?? args.agent?.contextTokens,
fallbackContextTokens: DEFAULT_CONTEXT_TOKENS,
}) ?? DEFAULT_CONTEXT_TOKENS);
const thinkLevel =
args.resolvedThink ?? args.sessionEntry?.thinkingLevel ?? args.agent?.thinkingDefault ?? "off";
const verboseLevel =
@ -581,7 +696,6 @@ export function buildStatusMessage(args: StatusArgs): string {
args.activeModelAuth ??
(activeAuthMode && activeAuthMode !== "unknown" ? activeAuthMode : undefined);
const selectedModelLabel = modelRefs.selected.label || "unknown";
const activeModelLabel = formatProviderModelRef(activeProvider, activeModel) || "unknown";
const fallbackState = resolveActiveFallbackState({
selectedModelRef: selectedModelLabel,
activeModelRef: activeModelLabel,